xref: /linux/fs/btrfs/file.c (revision dadf03cfd4eaa09f1d0e8b2521de1e11d3e3bec1)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/fs.h>
7 #include <linux/pagemap.h>
8 #include <linux/time.h>
9 #include <linux/init.h>
10 #include <linux/string.h>
11 #include <linux/backing-dev.h>
12 #include <linux/falloc.h>
13 #include <linux/writeback.h>
14 #include <linux/compat.h>
15 #include <linux/slab.h>
16 #include <linux/btrfs.h>
17 #include <linux/uio.h>
18 #include <linux/iversion.h>
19 #include <linux/fsverity.h>
20 #include "ctree.h"
21 #include "direct-io.h"
22 #include "disk-io.h"
23 #include "transaction.h"
24 #include "btrfs_inode.h"
25 #include "tree-log.h"
26 #include "locking.h"
27 #include "qgroup.h"
28 #include "compression.h"
29 #include "delalloc-space.h"
30 #include "reflink.h"
31 #include "subpage.h"
32 #include "fs.h"
33 #include "accessors.h"
34 #include "extent-tree.h"
35 #include "file-item.h"
36 #include "ioctl.h"
37 #include "file.h"
38 #include "super.h"
39 
40 /*
41  * Helper to fault in page and copy.  This should go away and be replaced with
42  * calls into generic code.
43  */
44 static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
45 					 struct folio *folio, struct iov_iter *i)
46 {
47 	size_t copied = 0;
48 	size_t total_copied = 0;
49 	int offset = offset_in_page(pos);
50 
51 	while (write_bytes > 0) {
52 		size_t count = min_t(size_t, PAGE_SIZE - offset, write_bytes);
53 		/*
54 		 * Copy data from userspace to the current page
55 		 */
56 		copied = copy_folio_from_iter_atomic(folio, offset, count, i);
57 
58 		/* Flush processor's dcache for this page */
59 		flush_dcache_folio(folio);
60 
61 		/*
62 		 * if we get a partial write, we can end up with
63 		 * partially up to date page.  These add
64 		 * a lot of complexity, so make sure they don't
65 		 * happen by forcing this copy to be retried.
66 		 *
67 		 * The rest of the btrfs_file_write code will fall
68 		 * back to page at a time copies after we return 0.
69 		 */
70 		if (unlikely(copied < count)) {
71 			if (!folio_test_uptodate(folio)) {
72 				iov_iter_revert(i, copied);
73 				copied = 0;
74 			}
75 			if (!copied)
76 				break;
77 		}
78 
79 		write_bytes -= copied;
80 		total_copied += copied;
81 		offset += copied;
82 	}
83 	return total_copied;
84 }
85 
86 /*
87  * Unlock folio after btrfs_file_write() is done with it.
88  */
89 static void btrfs_drop_folio(struct btrfs_fs_info *fs_info, struct folio *folio,
90 			     u64 pos, u64 copied)
91 {
92 	u64 block_start = round_down(pos, fs_info->sectorsize);
93 	u64 block_len = round_up(pos + copied, fs_info->sectorsize) - block_start;
94 
95 	ASSERT(block_len <= U32_MAX);
96 	/*
97 	 * Folio checked is some magic around finding folios that have been
98 	 * modified without going through btrfs_dirty_folio().  Clear it here.
99 	 * There should be no need to mark the pages accessed as
100 	 * prepare_one_folio() should have marked them accessed in
101 	 * prepare_one_folio() via find_or_create_page()
102 	 */
103 	btrfs_folio_clamp_clear_checked(fs_info, folio, block_start, block_len);
104 	folio_unlock(folio);
105 	folio_put(folio);
106 }
107 
108 /*
109  * After btrfs_copy_from_user(), update the following things for delalloc:
110  * - Mark newly dirtied folio as DELALLOC in the io tree.
111  *   Used to advise which range is to be written back.
112  * - Mark modified folio as Uptodate/Dirty and not needing COW fixup
113  * - Update inode size for past EOF write
114  */
115 int btrfs_dirty_folio(struct btrfs_inode *inode, struct folio *folio, loff_t pos,
116 		      size_t write_bytes, struct extent_state **cached, bool noreserve)
117 {
118 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
119 	int ret = 0;
120 	u64 num_bytes;
121 	u64 start_pos;
122 	u64 end_of_last_block;
123 	u64 end_pos = pos + write_bytes;
124 	loff_t isize = i_size_read(&inode->vfs_inode);
125 	unsigned int extra_bits = 0;
126 
127 	if (write_bytes == 0)
128 		return 0;
129 
130 	if (noreserve)
131 		extra_bits |= EXTENT_NORESERVE;
132 
133 	start_pos = round_down(pos, fs_info->sectorsize);
134 	num_bytes = round_up(write_bytes + pos - start_pos,
135 			     fs_info->sectorsize);
136 	ASSERT(num_bytes <= U32_MAX);
137 	ASSERT(folio_pos(folio) <= pos &&
138 	       folio_pos(folio) + folio_size(folio) >= pos + write_bytes);
139 
140 	end_of_last_block = start_pos + num_bytes - 1;
141 
142 	/*
143 	 * The pages may have already been dirty, clear out old accounting so
144 	 * we can set things up properly
145 	 */
146 	clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block,
147 			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
148 			 cached);
149 
150 	ret = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
151 					extra_bits, cached);
152 	if (ret)
153 		return ret;
154 
155 	btrfs_folio_clamp_set_uptodate(fs_info, folio, start_pos, num_bytes);
156 	btrfs_folio_clamp_clear_checked(fs_info, folio, start_pos, num_bytes);
157 	btrfs_folio_clamp_set_dirty(fs_info, folio, start_pos, num_bytes);
158 
159 	/*
160 	 * we've only changed i_size in ram, and we haven't updated
161 	 * the disk i_size.  There is no need to log the inode
162 	 * at this time.
163 	 */
164 	if (end_pos > isize)
165 		i_size_write(&inode->vfs_inode, end_pos);
166 	return 0;
167 }
168 
169 /*
170  * this is very complex, but the basic idea is to drop all extents
171  * in the range start - end.  hint_block is filled in with a block number
172  * that would be a good hint to the block allocator for this file.
173  *
174  * If an extent intersects the range but is not entirely inside the range
175  * it is either truncated or split.  Anything entirely inside the range
176  * is deleted from the tree.
177  *
178  * Note: the VFS' inode number of bytes is not updated, it's up to the caller
179  * to deal with that. We set the field 'bytes_found' of the arguments structure
180  * with the number of allocated bytes found in the target range, so that the
181  * caller can update the inode's number of bytes in an atomic way when
182  * replacing extents in a range to avoid races with stat(2).
183  */
184 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
185 		       struct btrfs_root *root, struct btrfs_inode *inode,
186 		       struct btrfs_drop_extents_args *args)
187 {
188 	struct btrfs_fs_info *fs_info = root->fs_info;
189 	struct extent_buffer *leaf;
190 	struct btrfs_file_extent_item *fi;
191 	struct btrfs_key key;
192 	struct btrfs_key new_key;
193 	u64 ino = btrfs_ino(inode);
194 	u64 search_start = args->start;
195 	u64 disk_bytenr = 0;
196 	u64 num_bytes = 0;
197 	u64 extent_offset = 0;
198 	u64 extent_end = 0;
199 	u64 last_end = args->start;
200 	int del_nr = 0;
201 	int del_slot = 0;
202 	int extent_type;
203 	int recow;
204 	int ret;
205 	int modify_tree = -1;
206 	int update_refs;
207 	int found = 0;
208 	struct btrfs_path *path = args->path;
209 
210 	args->bytes_found = 0;
211 	args->extent_inserted = false;
212 
213 	/* Must always have a path if ->replace_extent is true */
214 	ASSERT(!(args->replace_extent && !args->path));
215 
216 	if (!path) {
217 		path = btrfs_alloc_path();
218 		if (!path) {
219 			ret = -ENOMEM;
220 			goto out;
221 		}
222 	}
223 
224 	if (args->drop_cache)
225 		btrfs_drop_extent_map_range(inode, args->start, args->end - 1, false);
226 
227 	if (args->start >= inode->disk_i_size && !args->replace_extent)
228 		modify_tree = 0;
229 
230 	update_refs = (btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID);
231 	while (1) {
232 		recow = 0;
233 		ret = btrfs_lookup_file_extent(trans, root, path, ino,
234 					       search_start, modify_tree);
235 		if (ret < 0)
236 			break;
237 		if (ret > 0 && path->slots[0] > 0 && search_start == args->start) {
238 			leaf = path->nodes[0];
239 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
240 			if (key.objectid == ino &&
241 			    key.type == BTRFS_EXTENT_DATA_KEY)
242 				path->slots[0]--;
243 		}
244 		ret = 0;
245 next_slot:
246 		leaf = path->nodes[0];
247 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
248 			BUG_ON(del_nr > 0);
249 			ret = btrfs_next_leaf(root, path);
250 			if (ret < 0)
251 				break;
252 			if (ret > 0) {
253 				ret = 0;
254 				break;
255 			}
256 			leaf = path->nodes[0];
257 			recow = 1;
258 		}
259 
260 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
261 
262 		if (key.objectid > ino)
263 			break;
264 		if (WARN_ON_ONCE(key.objectid < ino) ||
265 		    key.type < BTRFS_EXTENT_DATA_KEY) {
266 			ASSERT(del_nr == 0);
267 			path->slots[0]++;
268 			goto next_slot;
269 		}
270 		if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= args->end)
271 			break;
272 
273 		fi = btrfs_item_ptr(leaf, path->slots[0],
274 				    struct btrfs_file_extent_item);
275 		extent_type = btrfs_file_extent_type(leaf, fi);
276 
277 		if (extent_type == BTRFS_FILE_EXTENT_REG ||
278 		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
279 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
280 			num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
281 			extent_offset = btrfs_file_extent_offset(leaf, fi);
282 			extent_end = key.offset +
283 				btrfs_file_extent_num_bytes(leaf, fi);
284 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
285 			extent_end = key.offset +
286 				btrfs_file_extent_ram_bytes(leaf, fi);
287 		} else {
288 			/* can't happen */
289 			BUG();
290 		}
291 
292 		/*
293 		 * Don't skip extent items representing 0 byte lengths. They
294 		 * used to be created (bug) if while punching holes we hit
295 		 * -ENOSPC condition. So if we find one here, just ensure we
296 		 * delete it, otherwise we would insert a new file extent item
297 		 * with the same key (offset) as that 0 bytes length file
298 		 * extent item in the call to setup_items_for_insert() later
299 		 * in this function.
300 		 */
301 		if (extent_end == key.offset && extent_end >= search_start) {
302 			last_end = extent_end;
303 			goto delete_extent_item;
304 		}
305 
306 		if (extent_end <= search_start) {
307 			path->slots[0]++;
308 			goto next_slot;
309 		}
310 
311 		found = 1;
312 		search_start = max(key.offset, args->start);
313 		if (recow || !modify_tree) {
314 			modify_tree = -1;
315 			btrfs_release_path(path);
316 			continue;
317 		}
318 
319 		/*
320 		 *     | - range to drop - |
321 		 *  | -------- extent -------- |
322 		 */
323 		if (args->start > key.offset && args->end < extent_end) {
324 			BUG_ON(del_nr > 0);
325 			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
326 				ret = -EOPNOTSUPP;
327 				break;
328 			}
329 
330 			memcpy(&new_key, &key, sizeof(new_key));
331 			new_key.offset = args->start;
332 			ret = btrfs_duplicate_item(trans, root, path,
333 						   &new_key);
334 			if (ret == -EAGAIN) {
335 				btrfs_release_path(path);
336 				continue;
337 			}
338 			if (ret < 0)
339 				break;
340 
341 			leaf = path->nodes[0];
342 			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
343 					    struct btrfs_file_extent_item);
344 			btrfs_set_file_extent_num_bytes(leaf, fi,
345 							args->start - key.offset);
346 
347 			fi = btrfs_item_ptr(leaf, path->slots[0],
348 					    struct btrfs_file_extent_item);
349 
350 			extent_offset += args->start - key.offset;
351 			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
352 			btrfs_set_file_extent_num_bytes(leaf, fi,
353 							extent_end - args->start);
354 			btrfs_mark_buffer_dirty(trans, leaf);
355 
356 			if (update_refs && disk_bytenr > 0) {
357 				struct btrfs_ref ref = {
358 					.action = BTRFS_ADD_DELAYED_REF,
359 					.bytenr = disk_bytenr,
360 					.num_bytes = num_bytes,
361 					.parent = 0,
362 					.owning_root = btrfs_root_id(root),
363 					.ref_root = btrfs_root_id(root),
364 				};
365 				btrfs_init_data_ref(&ref, new_key.objectid,
366 						    args->start - extent_offset,
367 						    0, false);
368 				ret = btrfs_inc_extent_ref(trans, &ref);
369 				if (ret) {
370 					btrfs_abort_transaction(trans, ret);
371 					break;
372 				}
373 			}
374 			key.offset = args->start;
375 		}
376 		/*
377 		 * From here on out we will have actually dropped something, so
378 		 * last_end can be updated.
379 		 */
380 		last_end = extent_end;
381 
382 		/*
383 		 *  | ---- range to drop ----- |
384 		 *      | -------- extent -------- |
385 		 */
386 		if (args->start <= key.offset && args->end < extent_end) {
387 			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
388 				ret = -EOPNOTSUPP;
389 				break;
390 			}
391 
392 			memcpy(&new_key, &key, sizeof(new_key));
393 			new_key.offset = args->end;
394 			btrfs_set_item_key_safe(trans, path, &new_key);
395 
396 			extent_offset += args->end - key.offset;
397 			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
398 			btrfs_set_file_extent_num_bytes(leaf, fi,
399 							extent_end - args->end);
400 			btrfs_mark_buffer_dirty(trans, leaf);
401 			if (update_refs && disk_bytenr > 0)
402 				args->bytes_found += args->end - key.offset;
403 			break;
404 		}
405 
406 		search_start = extent_end;
407 		/*
408 		 *       | ---- range to drop ----- |
409 		 *  | -------- extent -------- |
410 		 */
411 		if (args->start > key.offset && args->end >= extent_end) {
412 			BUG_ON(del_nr > 0);
413 			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
414 				ret = -EOPNOTSUPP;
415 				break;
416 			}
417 
418 			btrfs_set_file_extent_num_bytes(leaf, fi,
419 							args->start - key.offset);
420 			btrfs_mark_buffer_dirty(trans, leaf);
421 			if (update_refs && disk_bytenr > 0)
422 				args->bytes_found += extent_end - args->start;
423 			if (args->end == extent_end)
424 				break;
425 
426 			path->slots[0]++;
427 			goto next_slot;
428 		}
429 
430 		/*
431 		 *  | ---- range to drop ----- |
432 		 *    | ------ extent ------ |
433 		 */
434 		if (args->start <= key.offset && args->end >= extent_end) {
435 delete_extent_item:
436 			if (del_nr == 0) {
437 				del_slot = path->slots[0];
438 				del_nr = 1;
439 			} else {
440 				BUG_ON(del_slot + del_nr != path->slots[0]);
441 				del_nr++;
442 			}
443 
444 			if (update_refs &&
445 			    extent_type == BTRFS_FILE_EXTENT_INLINE) {
446 				args->bytes_found += extent_end - key.offset;
447 				extent_end = ALIGN(extent_end,
448 						   fs_info->sectorsize);
449 			} else if (update_refs && disk_bytenr > 0) {
450 				struct btrfs_ref ref = {
451 					.action = BTRFS_DROP_DELAYED_REF,
452 					.bytenr = disk_bytenr,
453 					.num_bytes = num_bytes,
454 					.parent = 0,
455 					.owning_root = btrfs_root_id(root),
456 					.ref_root = btrfs_root_id(root),
457 				};
458 				btrfs_init_data_ref(&ref, key.objectid,
459 						    key.offset - extent_offset,
460 						    0, false);
461 				ret = btrfs_free_extent(trans, &ref);
462 				if (ret) {
463 					btrfs_abort_transaction(trans, ret);
464 					break;
465 				}
466 				args->bytes_found += extent_end - key.offset;
467 			}
468 
469 			if (args->end == extent_end)
470 				break;
471 
472 			if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
473 				path->slots[0]++;
474 				goto next_slot;
475 			}
476 
477 			ret = btrfs_del_items(trans, root, path, del_slot,
478 					      del_nr);
479 			if (ret) {
480 				btrfs_abort_transaction(trans, ret);
481 				break;
482 			}
483 
484 			del_nr = 0;
485 			del_slot = 0;
486 
487 			btrfs_release_path(path);
488 			continue;
489 		}
490 
491 		BUG();
492 	}
493 
494 	if (!ret && del_nr > 0) {
495 		/*
496 		 * Set path->slots[0] to first slot, so that after the delete
497 		 * if items are move off from our leaf to its immediate left or
498 		 * right neighbor leafs, we end up with a correct and adjusted
499 		 * path->slots[0] for our insertion (if args->replace_extent).
500 		 */
501 		path->slots[0] = del_slot;
502 		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
503 		if (ret)
504 			btrfs_abort_transaction(trans, ret);
505 	}
506 
507 	leaf = path->nodes[0];
508 	/*
509 	 * If btrfs_del_items() was called, it might have deleted a leaf, in
510 	 * which case it unlocked our path, so check path->locks[0] matches a
511 	 * write lock.
512 	 */
513 	if (!ret && args->replace_extent &&
514 	    path->locks[0] == BTRFS_WRITE_LOCK &&
515 	    btrfs_leaf_free_space(leaf) >=
516 	    sizeof(struct btrfs_item) + args->extent_item_size) {
517 
518 		key.objectid = ino;
519 		key.type = BTRFS_EXTENT_DATA_KEY;
520 		key.offset = args->start;
521 		if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
522 			struct btrfs_key slot_key;
523 
524 			btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
525 			if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
526 				path->slots[0]++;
527 		}
528 		btrfs_setup_item_for_insert(trans, root, path, &key,
529 					    args->extent_item_size);
530 		args->extent_inserted = true;
531 	}
532 
533 	if (!args->path)
534 		btrfs_free_path(path);
535 	else if (!args->extent_inserted)
536 		btrfs_release_path(path);
537 out:
538 	args->drop_end = found ? min(args->end, last_end) : args->end;
539 
540 	return ret;
541 }
542 
543 static int extent_mergeable(struct extent_buffer *leaf, int slot,
544 			    u64 objectid, u64 bytenr, u64 orig_offset,
545 			    u64 *start, u64 *end)
546 {
547 	struct btrfs_file_extent_item *fi;
548 	struct btrfs_key key;
549 	u64 extent_end;
550 
551 	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
552 		return 0;
553 
554 	btrfs_item_key_to_cpu(leaf, &key, slot);
555 	if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
556 		return 0;
557 
558 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
559 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
560 	    btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
561 	    btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
562 	    btrfs_file_extent_compression(leaf, fi) ||
563 	    btrfs_file_extent_encryption(leaf, fi) ||
564 	    btrfs_file_extent_other_encoding(leaf, fi))
565 		return 0;
566 
567 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
568 	if ((*start && *start != key.offset) || (*end && *end != extent_end))
569 		return 0;
570 
571 	*start = key.offset;
572 	*end = extent_end;
573 	return 1;
574 }
575 
576 /*
577  * Mark extent in the range start - end as written.
578  *
579  * This changes extent type from 'pre-allocated' to 'regular'. If only
580  * part of extent is marked as written, the extent will be split into
581  * two or three.
582  */
583 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
584 			      struct btrfs_inode *inode, u64 start, u64 end)
585 {
586 	struct btrfs_root *root = inode->root;
587 	struct extent_buffer *leaf;
588 	struct btrfs_path *path;
589 	struct btrfs_file_extent_item *fi;
590 	struct btrfs_ref ref = { 0 };
591 	struct btrfs_key key;
592 	struct btrfs_key new_key;
593 	u64 bytenr;
594 	u64 num_bytes;
595 	u64 extent_end;
596 	u64 orig_offset;
597 	u64 other_start;
598 	u64 other_end;
599 	u64 split;
600 	int del_nr = 0;
601 	int del_slot = 0;
602 	int recow;
603 	int ret = 0;
604 	u64 ino = btrfs_ino(inode);
605 
606 	path = btrfs_alloc_path();
607 	if (!path)
608 		return -ENOMEM;
609 again:
610 	recow = 0;
611 	split = start;
612 	key.objectid = ino;
613 	key.type = BTRFS_EXTENT_DATA_KEY;
614 	key.offset = split;
615 
616 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
617 	if (ret < 0)
618 		goto out;
619 	if (ret > 0 && path->slots[0] > 0)
620 		path->slots[0]--;
621 
622 	leaf = path->nodes[0];
623 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
624 	if (key.objectid != ino ||
625 	    key.type != BTRFS_EXTENT_DATA_KEY) {
626 		ret = -EINVAL;
627 		btrfs_abort_transaction(trans, ret);
628 		goto out;
629 	}
630 	fi = btrfs_item_ptr(leaf, path->slots[0],
631 			    struct btrfs_file_extent_item);
632 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
633 		ret = -EINVAL;
634 		btrfs_abort_transaction(trans, ret);
635 		goto out;
636 	}
637 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
638 	if (key.offset > start || extent_end < end) {
639 		ret = -EINVAL;
640 		btrfs_abort_transaction(trans, ret);
641 		goto out;
642 	}
643 
644 	bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
645 	num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
646 	orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
647 	memcpy(&new_key, &key, sizeof(new_key));
648 
649 	if (start == key.offset && end < extent_end) {
650 		other_start = 0;
651 		other_end = start;
652 		if (extent_mergeable(leaf, path->slots[0] - 1,
653 				     ino, bytenr, orig_offset,
654 				     &other_start, &other_end)) {
655 			new_key.offset = end;
656 			btrfs_set_item_key_safe(trans, path, &new_key);
657 			fi = btrfs_item_ptr(leaf, path->slots[0],
658 					    struct btrfs_file_extent_item);
659 			btrfs_set_file_extent_generation(leaf, fi,
660 							 trans->transid);
661 			btrfs_set_file_extent_num_bytes(leaf, fi,
662 							extent_end - end);
663 			btrfs_set_file_extent_offset(leaf, fi,
664 						     end - orig_offset);
665 			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
666 					    struct btrfs_file_extent_item);
667 			btrfs_set_file_extent_generation(leaf, fi,
668 							 trans->transid);
669 			btrfs_set_file_extent_num_bytes(leaf, fi,
670 							end - other_start);
671 			btrfs_mark_buffer_dirty(trans, leaf);
672 			goto out;
673 		}
674 	}
675 
676 	if (start > key.offset && end == extent_end) {
677 		other_start = end;
678 		other_end = 0;
679 		if (extent_mergeable(leaf, path->slots[0] + 1,
680 				     ino, bytenr, orig_offset,
681 				     &other_start, &other_end)) {
682 			fi = btrfs_item_ptr(leaf, path->slots[0],
683 					    struct btrfs_file_extent_item);
684 			btrfs_set_file_extent_num_bytes(leaf, fi,
685 							start - key.offset);
686 			btrfs_set_file_extent_generation(leaf, fi,
687 							 trans->transid);
688 			path->slots[0]++;
689 			new_key.offset = start;
690 			btrfs_set_item_key_safe(trans, path, &new_key);
691 
692 			fi = btrfs_item_ptr(leaf, path->slots[0],
693 					    struct btrfs_file_extent_item);
694 			btrfs_set_file_extent_generation(leaf, fi,
695 							 trans->transid);
696 			btrfs_set_file_extent_num_bytes(leaf, fi,
697 							other_end - start);
698 			btrfs_set_file_extent_offset(leaf, fi,
699 						     start - orig_offset);
700 			btrfs_mark_buffer_dirty(trans, leaf);
701 			goto out;
702 		}
703 	}
704 
705 	while (start > key.offset || end < extent_end) {
706 		if (key.offset == start)
707 			split = end;
708 
709 		new_key.offset = split;
710 		ret = btrfs_duplicate_item(trans, root, path, &new_key);
711 		if (ret == -EAGAIN) {
712 			btrfs_release_path(path);
713 			goto again;
714 		}
715 		if (ret < 0) {
716 			btrfs_abort_transaction(trans, ret);
717 			goto out;
718 		}
719 
720 		leaf = path->nodes[0];
721 		fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
722 				    struct btrfs_file_extent_item);
723 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
724 		btrfs_set_file_extent_num_bytes(leaf, fi,
725 						split - key.offset);
726 
727 		fi = btrfs_item_ptr(leaf, path->slots[0],
728 				    struct btrfs_file_extent_item);
729 
730 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
731 		btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
732 		btrfs_set_file_extent_num_bytes(leaf, fi,
733 						extent_end - split);
734 		btrfs_mark_buffer_dirty(trans, leaf);
735 
736 		ref.action = BTRFS_ADD_DELAYED_REF;
737 		ref.bytenr = bytenr;
738 		ref.num_bytes = num_bytes;
739 		ref.parent = 0;
740 		ref.owning_root = btrfs_root_id(root);
741 		ref.ref_root = btrfs_root_id(root);
742 		btrfs_init_data_ref(&ref, ino, orig_offset, 0, false);
743 		ret = btrfs_inc_extent_ref(trans, &ref);
744 		if (ret) {
745 			btrfs_abort_transaction(trans, ret);
746 			goto out;
747 		}
748 
749 		if (split == start) {
750 			key.offset = start;
751 		} else {
752 			if (start != key.offset) {
753 				ret = -EINVAL;
754 				btrfs_abort_transaction(trans, ret);
755 				goto out;
756 			}
757 			path->slots[0]--;
758 			extent_end = end;
759 		}
760 		recow = 1;
761 	}
762 
763 	other_start = end;
764 	other_end = 0;
765 
766 	ref.action = BTRFS_DROP_DELAYED_REF;
767 	ref.bytenr = bytenr;
768 	ref.num_bytes = num_bytes;
769 	ref.parent = 0;
770 	ref.owning_root = btrfs_root_id(root);
771 	ref.ref_root = btrfs_root_id(root);
772 	btrfs_init_data_ref(&ref, ino, orig_offset, 0, false);
773 	if (extent_mergeable(leaf, path->slots[0] + 1,
774 			     ino, bytenr, orig_offset,
775 			     &other_start, &other_end)) {
776 		if (recow) {
777 			btrfs_release_path(path);
778 			goto again;
779 		}
780 		extent_end = other_end;
781 		del_slot = path->slots[0] + 1;
782 		del_nr++;
783 		ret = btrfs_free_extent(trans, &ref);
784 		if (ret) {
785 			btrfs_abort_transaction(trans, ret);
786 			goto out;
787 		}
788 	}
789 	other_start = 0;
790 	other_end = start;
791 	if (extent_mergeable(leaf, path->slots[0] - 1,
792 			     ino, bytenr, orig_offset,
793 			     &other_start, &other_end)) {
794 		if (recow) {
795 			btrfs_release_path(path);
796 			goto again;
797 		}
798 		key.offset = other_start;
799 		del_slot = path->slots[0];
800 		del_nr++;
801 		ret = btrfs_free_extent(trans, &ref);
802 		if (ret) {
803 			btrfs_abort_transaction(trans, ret);
804 			goto out;
805 		}
806 	}
807 	if (del_nr == 0) {
808 		fi = btrfs_item_ptr(leaf, path->slots[0],
809 			   struct btrfs_file_extent_item);
810 		btrfs_set_file_extent_type(leaf, fi,
811 					   BTRFS_FILE_EXTENT_REG);
812 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
813 		btrfs_mark_buffer_dirty(trans, leaf);
814 	} else {
815 		fi = btrfs_item_ptr(leaf, del_slot - 1,
816 			   struct btrfs_file_extent_item);
817 		btrfs_set_file_extent_type(leaf, fi,
818 					   BTRFS_FILE_EXTENT_REG);
819 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
820 		btrfs_set_file_extent_num_bytes(leaf, fi,
821 						extent_end - key.offset);
822 		btrfs_mark_buffer_dirty(trans, leaf);
823 
824 		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
825 		if (ret < 0) {
826 			btrfs_abort_transaction(trans, ret);
827 			goto out;
828 		}
829 	}
830 out:
831 	btrfs_free_path(path);
832 	return ret;
833 }
834 
835 /*
836  * On error return an unlocked folio and the error value
837  * On success return a locked folio and 0
838  */
839 static int prepare_uptodate_folio(struct inode *inode, struct folio *folio, u64 pos,
840 				  u64 len, bool force_uptodate)
841 {
842 	u64 clamp_start = max_t(u64, pos, folio_pos(folio));
843 	u64 clamp_end = min_t(u64, pos + len, folio_pos(folio) + folio_size(folio));
844 	int ret = 0;
845 
846 	if (folio_test_uptodate(folio))
847 		return 0;
848 
849 	if (!force_uptodate &&
850 	    IS_ALIGNED(clamp_start, PAGE_SIZE) &&
851 	    IS_ALIGNED(clamp_end, PAGE_SIZE))
852 		return 0;
853 
854 	ret = btrfs_read_folio(NULL, folio);
855 	if (ret)
856 		return ret;
857 	folio_lock(folio);
858 	if (!folio_test_uptodate(folio)) {
859 		folio_unlock(folio);
860 		return -EIO;
861 	}
862 
863 	/*
864 	 * Since btrfs_read_folio() will unlock the folio before it returns,
865 	 * there is a window where btrfs_release_folio() can be called to
866 	 * release the page.  Here we check both inode mapping and page
867 	 * private to make sure the page was not released.
868 	 *
869 	 * The private flag check is essential for subpage as we need to store
870 	 * extra bitmap using folio private.
871 	 */
872 	if (folio->mapping != inode->i_mapping || !folio_test_private(folio)) {
873 		folio_unlock(folio);
874 		return -EAGAIN;
875 	}
876 	return 0;
877 }
878 
879 static gfp_t get_prepare_gfp_flags(struct inode *inode, bool nowait)
880 {
881 	gfp_t gfp;
882 
883 	gfp = btrfs_alloc_write_mask(inode->i_mapping);
884 	if (nowait) {
885 		gfp &= ~__GFP_DIRECT_RECLAIM;
886 		gfp |= GFP_NOWAIT;
887 	}
888 
889 	return gfp;
890 }
891 
892 /*
893  * Get folio into the page cache and lock it.
894  */
895 static noinline int prepare_one_folio(struct inode *inode, struct folio **folio_ret,
896 				      loff_t pos, size_t write_bytes,
897 				      bool force_uptodate, bool nowait)
898 {
899 	unsigned long index = pos >> PAGE_SHIFT;
900 	gfp_t mask = get_prepare_gfp_flags(inode, nowait);
901 	fgf_t fgp_flags = (nowait ? FGP_WRITEBEGIN | FGP_NOWAIT : FGP_WRITEBEGIN);
902 	struct folio *folio;
903 	int ret = 0;
904 
905 again:
906 	folio = __filemap_get_folio(inode->i_mapping, index, fgp_flags, mask);
907 	if (IS_ERR(folio)) {
908 		if (nowait)
909 			ret = -EAGAIN;
910 		else
911 			ret = PTR_ERR(folio);
912 		return ret;
913 	}
914 	folio_wait_writeback(folio);
915 	/* Only support page sized folio yet. */
916 	ASSERT(folio_order(folio) == 0);
917 	ret = set_folio_extent_mapped(folio);
918 	if (ret < 0) {
919 		folio_unlock(folio);
920 		folio_put(folio);
921 		return ret;
922 	}
923 	ret = prepare_uptodate_folio(inode, folio, pos, write_bytes, force_uptodate);
924 	if (ret) {
925 		/* The folio is already unlocked. */
926 		folio_put(folio);
927 		if (!nowait && ret == -EAGAIN) {
928 			ret = 0;
929 			goto again;
930 		}
931 		return ret;
932 	}
933 	*folio_ret = folio;
934 	return 0;
935 }
936 
937 /*
938  * Locks the extent and properly waits for data=ordered extents to finish
939  * before allowing the folios to be modified if need.
940  *
941  * Return:
942  * 1 - the extent is locked
943  * 0 - the extent is not locked, and everything is OK
944  * -EAGAIN - need to prepare the folios again
945  */
946 static noinline int
947 lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct folio *folio,
948 				loff_t pos, size_t write_bytes,
949 				u64 *lockstart, u64 *lockend, bool nowait,
950 				struct extent_state **cached_state)
951 {
952 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
953 	u64 start_pos;
954 	u64 last_pos;
955 	int ret = 0;
956 
957 	start_pos = round_down(pos, fs_info->sectorsize);
958 	last_pos = round_up(pos + write_bytes, fs_info->sectorsize) - 1;
959 
960 	if (start_pos < inode->vfs_inode.i_size) {
961 		struct btrfs_ordered_extent *ordered;
962 
963 		if (nowait) {
964 			if (!try_lock_extent(&inode->io_tree, start_pos, last_pos,
965 					     cached_state)) {
966 				folio_unlock(folio);
967 				folio_put(folio);
968 				return -EAGAIN;
969 			}
970 		} else {
971 			lock_extent(&inode->io_tree, start_pos, last_pos, cached_state);
972 		}
973 
974 		ordered = btrfs_lookup_ordered_range(inode, start_pos,
975 						     last_pos - start_pos + 1);
976 		if (ordered &&
977 		    ordered->file_offset + ordered->num_bytes > start_pos &&
978 		    ordered->file_offset <= last_pos) {
979 			unlock_extent(&inode->io_tree, start_pos, last_pos,
980 				      cached_state);
981 			folio_unlock(folio);
982 			folio_put(folio);
983 			btrfs_start_ordered_extent(ordered);
984 			btrfs_put_ordered_extent(ordered);
985 			return -EAGAIN;
986 		}
987 		if (ordered)
988 			btrfs_put_ordered_extent(ordered);
989 
990 		*lockstart = start_pos;
991 		*lockend = last_pos;
992 		ret = 1;
993 	}
994 
995 	/*
996 	 * We should be called after prepare_one_folio() which should have locked
997 	 * all pages in the range.
998 	 */
999 	WARN_ON(!folio_test_locked(folio));
1000 
1001 	return ret;
1002 }
1003 
1004 /*
1005  * Check if we can do nocow write into the range [@pos, @pos + @write_bytes)
1006  *
1007  * @pos:         File offset.
1008  * @write_bytes: The length to write, will be updated to the nocow writeable
1009  *               range.
1010  *
1011  * This function will flush ordered extents in the range to ensure proper
1012  * nocow checks.
1013  *
1014  * Return:
1015  * > 0          If we can nocow, and updates @write_bytes.
1016  *  0           If we can't do a nocow write.
1017  * -EAGAIN      If we can't do a nocow write because snapshoting of the inode's
1018  *              root is in progress.
1019  * < 0          If an error happened.
1020  *
1021  * NOTE: Callers need to call btrfs_check_nocow_unlock() if we return > 0.
1022  */
1023 int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
1024 			   size_t *write_bytes, bool nowait)
1025 {
1026 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1027 	struct btrfs_root *root = inode->root;
1028 	struct extent_state *cached_state = NULL;
1029 	u64 lockstart, lockend;
1030 	u64 num_bytes;
1031 	int ret;
1032 
1033 	if (!(inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
1034 		return 0;
1035 
1036 	if (!btrfs_drew_try_write_lock(&root->snapshot_lock))
1037 		return -EAGAIN;
1038 
1039 	lockstart = round_down(pos, fs_info->sectorsize);
1040 	lockend = round_up(pos + *write_bytes,
1041 			   fs_info->sectorsize) - 1;
1042 	num_bytes = lockend - lockstart + 1;
1043 
1044 	if (nowait) {
1045 		if (!btrfs_try_lock_ordered_range(inode, lockstart, lockend,
1046 						  &cached_state)) {
1047 			btrfs_drew_write_unlock(&root->snapshot_lock);
1048 			return -EAGAIN;
1049 		}
1050 	} else {
1051 		btrfs_lock_and_flush_ordered_range(inode, lockstart, lockend,
1052 						   &cached_state);
1053 	}
1054 	ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
1055 			       NULL, nowait, false);
1056 	if (ret <= 0)
1057 		btrfs_drew_write_unlock(&root->snapshot_lock);
1058 	else
1059 		*write_bytes = min_t(size_t, *write_bytes ,
1060 				     num_bytes - pos + lockstart);
1061 	unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
1062 
1063 	return ret;
1064 }
1065 
1066 void btrfs_check_nocow_unlock(struct btrfs_inode *inode)
1067 {
1068 	btrfs_drew_write_unlock(&inode->root->snapshot_lock);
1069 }
1070 
1071 static void update_time_for_write(struct inode *inode)
1072 {
1073 	struct timespec64 now, ts;
1074 
1075 	if (IS_NOCMTIME(inode))
1076 		return;
1077 
1078 	now = current_time(inode);
1079 	ts = inode_get_mtime(inode);
1080 	if (!timespec64_equal(&ts, &now))
1081 		inode_set_mtime_to_ts(inode, now);
1082 
1083 	ts = inode_get_ctime(inode);
1084 	if (!timespec64_equal(&ts, &now))
1085 		inode_set_ctime_to_ts(inode, now);
1086 
1087 	if (IS_I_VERSION(inode))
1088 		inode_inc_iversion(inode);
1089 }
1090 
1091 int btrfs_write_check(struct kiocb *iocb, size_t count)
1092 {
1093 	struct file *file = iocb->ki_filp;
1094 	struct inode *inode = file_inode(file);
1095 	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
1096 	loff_t pos = iocb->ki_pos;
1097 	int ret;
1098 	loff_t oldsize;
1099 	loff_t start_pos;
1100 
1101 	/*
1102 	 * Quickly bail out on NOWAIT writes if we don't have the nodatacow or
1103 	 * prealloc flags, as without those flags we always have to COW. We will
1104 	 * later check if we can really COW into the target range (using
1105 	 * can_nocow_extent() at btrfs_get_blocks_direct_write()).
1106 	 */
1107 	if ((iocb->ki_flags & IOCB_NOWAIT) &&
1108 	    !(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
1109 		return -EAGAIN;
1110 
1111 	ret = file_remove_privs(file);
1112 	if (ret)
1113 		return ret;
1114 
1115 	/*
1116 	 * We reserve space for updating the inode when we reserve space for the
1117 	 * extent we are going to write, so we will enospc out there.  We don't
1118 	 * need to start yet another transaction to update the inode as we will
1119 	 * update the inode when we finish writing whatever data we write.
1120 	 */
1121 	update_time_for_write(inode);
1122 
1123 	start_pos = round_down(pos, fs_info->sectorsize);
1124 	oldsize = i_size_read(inode);
1125 	if (start_pos > oldsize) {
1126 		/* Expand hole size to cover write data, preventing empty gap */
1127 		loff_t end_pos = round_up(pos + count, fs_info->sectorsize);
1128 
1129 		ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, end_pos);
1130 		if (ret)
1131 			return ret;
1132 	}
1133 
1134 	return 0;
1135 }
1136 
1137 ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *i)
1138 {
1139 	struct file *file = iocb->ki_filp;
1140 	loff_t pos;
1141 	struct inode *inode = file_inode(file);
1142 	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
1143 	struct extent_changeset *data_reserved = NULL;
1144 	u64 release_bytes = 0;
1145 	u64 lockstart;
1146 	u64 lockend;
1147 	size_t num_written = 0;
1148 	ssize_t ret;
1149 	loff_t old_isize = i_size_read(inode);
1150 	unsigned int ilock_flags = 0;
1151 	const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
1152 	unsigned int bdp_flags = (nowait ? BDP_ASYNC : 0);
1153 	bool only_release_metadata = false;
1154 
1155 	if (nowait)
1156 		ilock_flags |= BTRFS_ILOCK_TRY;
1157 
1158 	ret = btrfs_inode_lock(BTRFS_I(inode), ilock_flags);
1159 	if (ret < 0)
1160 		return ret;
1161 
1162 	ret = generic_write_checks(iocb, i);
1163 	if (ret <= 0)
1164 		goto out;
1165 
1166 	ret = btrfs_write_check(iocb, ret);
1167 	if (ret < 0)
1168 		goto out;
1169 
1170 	pos = iocb->ki_pos;
1171 	while (iov_iter_count(i) > 0) {
1172 		struct extent_state *cached_state = NULL;
1173 		size_t offset = offset_in_page(pos);
1174 		size_t sector_offset;
1175 		size_t write_bytes = min(iov_iter_count(i), PAGE_SIZE - offset);
1176 		size_t reserve_bytes;
1177 		size_t copied;
1178 		size_t dirty_sectors;
1179 		size_t num_sectors;
1180 		struct folio *folio = NULL;
1181 		int extents_locked;
1182 		bool force_page_uptodate = false;
1183 
1184 		/*
1185 		 * Fault pages before locking them in prepare_one_folio()
1186 		 * to avoid recursive lock
1187 		 */
1188 		if (unlikely(fault_in_iov_iter_readable(i, write_bytes))) {
1189 			ret = -EFAULT;
1190 			break;
1191 		}
1192 
1193 		only_release_metadata = false;
1194 		sector_offset = pos & (fs_info->sectorsize - 1);
1195 
1196 		extent_changeset_release(data_reserved);
1197 		ret = btrfs_check_data_free_space(BTRFS_I(inode),
1198 						  &data_reserved, pos,
1199 						  write_bytes, nowait);
1200 		if (ret < 0) {
1201 			int can_nocow;
1202 
1203 			if (nowait && (ret == -ENOSPC || ret == -EAGAIN)) {
1204 				ret = -EAGAIN;
1205 				break;
1206 			}
1207 
1208 			/*
1209 			 * If we don't have to COW at the offset, reserve
1210 			 * metadata only. write_bytes may get smaller than
1211 			 * requested here.
1212 			 */
1213 			can_nocow = btrfs_check_nocow_lock(BTRFS_I(inode), pos,
1214 							   &write_bytes, nowait);
1215 			if (can_nocow < 0)
1216 				ret = can_nocow;
1217 			if (can_nocow > 0)
1218 				ret = 0;
1219 			if (ret)
1220 				break;
1221 			only_release_metadata = true;
1222 		}
1223 
1224 		reserve_bytes = round_up(write_bytes + sector_offset,
1225 					 fs_info->sectorsize);
1226 		WARN_ON(reserve_bytes == 0);
1227 		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
1228 						      reserve_bytes,
1229 						      reserve_bytes, nowait);
1230 		if (ret) {
1231 			if (!only_release_metadata)
1232 				btrfs_free_reserved_data_space(BTRFS_I(inode),
1233 						data_reserved, pos,
1234 						write_bytes);
1235 			else
1236 				btrfs_check_nocow_unlock(BTRFS_I(inode));
1237 
1238 			if (nowait && ret == -ENOSPC)
1239 				ret = -EAGAIN;
1240 			break;
1241 		}
1242 
1243 		release_bytes = reserve_bytes;
1244 again:
1245 		ret = balance_dirty_pages_ratelimited_flags(inode->i_mapping, bdp_flags);
1246 		if (ret) {
1247 			btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1248 			break;
1249 		}
1250 
1251 		ret = prepare_one_folio(inode, &folio, pos, write_bytes,
1252 					force_page_uptodate, false);
1253 		if (ret) {
1254 			btrfs_delalloc_release_extents(BTRFS_I(inode),
1255 						       reserve_bytes);
1256 			break;
1257 		}
1258 
1259 		extents_locked = lock_and_cleanup_extent_if_need(BTRFS_I(inode),
1260 						folio, pos, write_bytes, &lockstart,
1261 						&lockend, nowait, &cached_state);
1262 		if (extents_locked < 0) {
1263 			if (!nowait && extents_locked == -EAGAIN)
1264 				goto again;
1265 
1266 			btrfs_delalloc_release_extents(BTRFS_I(inode),
1267 						       reserve_bytes);
1268 			ret = extents_locked;
1269 			break;
1270 		}
1271 
1272 		copied = btrfs_copy_from_user(pos, write_bytes, folio, i);
1273 
1274 		num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
1275 		dirty_sectors = round_up(copied + sector_offset,
1276 					fs_info->sectorsize);
1277 		dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
1278 
1279 		if (copied == 0) {
1280 			force_page_uptodate = true;
1281 			dirty_sectors = 0;
1282 		} else {
1283 			force_page_uptodate = false;
1284 		}
1285 
1286 		if (num_sectors > dirty_sectors) {
1287 			/* release everything except the sectors we dirtied */
1288 			release_bytes -= dirty_sectors << fs_info->sectorsize_bits;
1289 			if (only_release_metadata) {
1290 				btrfs_delalloc_release_metadata(BTRFS_I(inode),
1291 							release_bytes, true);
1292 			} else {
1293 				u64 release_start = round_up(pos + copied,
1294 							     fs_info->sectorsize);
1295 				btrfs_delalloc_release_space(BTRFS_I(inode),
1296 						data_reserved, release_start,
1297 						release_bytes, true);
1298 			}
1299 		}
1300 
1301 		release_bytes = round_up(copied + sector_offset,
1302 					fs_info->sectorsize);
1303 
1304 		ret = btrfs_dirty_folio(BTRFS_I(inode), folio, pos, copied,
1305 					&cached_state, only_release_metadata);
1306 
1307 		/*
1308 		 * If we have not locked the extent range, because the range's
1309 		 * start offset is >= i_size, we might still have a non-NULL
1310 		 * cached extent state, acquired while marking the extent range
1311 		 * as delalloc through btrfs_dirty_page(). Therefore free any
1312 		 * possible cached extent state to avoid a memory leak.
1313 		 */
1314 		if (extents_locked)
1315 			unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
1316 				      lockend, &cached_state);
1317 		else
1318 			free_extent_state(cached_state);
1319 
1320 		btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1321 		if (ret) {
1322 			btrfs_drop_folio(fs_info, folio, pos, copied);
1323 			break;
1324 		}
1325 
1326 		release_bytes = 0;
1327 		if (only_release_metadata)
1328 			btrfs_check_nocow_unlock(BTRFS_I(inode));
1329 
1330 		btrfs_drop_folio(fs_info, folio, pos, copied);
1331 
1332 		cond_resched();
1333 
1334 		pos += copied;
1335 		num_written += copied;
1336 	}
1337 
1338 	if (release_bytes) {
1339 		if (only_release_metadata) {
1340 			btrfs_check_nocow_unlock(BTRFS_I(inode));
1341 			btrfs_delalloc_release_metadata(BTRFS_I(inode),
1342 					release_bytes, true);
1343 		} else {
1344 			btrfs_delalloc_release_space(BTRFS_I(inode),
1345 					data_reserved,
1346 					round_down(pos, fs_info->sectorsize),
1347 					release_bytes, true);
1348 		}
1349 	}
1350 
1351 	extent_changeset_free(data_reserved);
1352 	if (num_written > 0) {
1353 		pagecache_isize_extended(inode, old_isize, iocb->ki_pos);
1354 		iocb->ki_pos += num_written;
1355 	}
1356 out:
1357 	btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1358 	return num_written ? num_written : ret;
1359 }
1360 
1361 static ssize_t btrfs_encoded_write(struct kiocb *iocb, struct iov_iter *from,
1362 			const struct btrfs_ioctl_encoded_io_args *encoded)
1363 {
1364 	struct file *file = iocb->ki_filp;
1365 	struct inode *inode = file_inode(file);
1366 	loff_t count;
1367 	ssize_t ret;
1368 
1369 	btrfs_inode_lock(BTRFS_I(inode), 0);
1370 	count = encoded->len;
1371 	ret = generic_write_checks_count(iocb, &count);
1372 	if (ret == 0 && count != encoded->len) {
1373 		/*
1374 		 * The write got truncated by generic_write_checks_count(). We
1375 		 * can't do a partial encoded write.
1376 		 */
1377 		ret = -EFBIG;
1378 	}
1379 	if (ret || encoded->len == 0)
1380 		goto out;
1381 
1382 	ret = btrfs_write_check(iocb, encoded->len);
1383 	if (ret < 0)
1384 		goto out;
1385 
1386 	ret = btrfs_do_encoded_write(iocb, from, encoded);
1387 out:
1388 	btrfs_inode_unlock(BTRFS_I(inode), 0);
1389 	return ret;
1390 }
1391 
1392 ssize_t btrfs_do_write_iter(struct kiocb *iocb, struct iov_iter *from,
1393 			    const struct btrfs_ioctl_encoded_io_args *encoded)
1394 {
1395 	struct file *file = iocb->ki_filp;
1396 	struct btrfs_inode *inode = BTRFS_I(file_inode(file));
1397 	ssize_t num_written, num_sync;
1398 
1399 	/*
1400 	 * If the fs flips readonly due to some impossible error, although we
1401 	 * have opened a file as writable, we have to stop this write operation
1402 	 * to ensure consistency.
1403 	 */
1404 	if (BTRFS_FS_ERROR(inode->root->fs_info))
1405 		return -EROFS;
1406 
1407 	if (encoded && (iocb->ki_flags & IOCB_NOWAIT))
1408 		return -EOPNOTSUPP;
1409 
1410 	if (encoded) {
1411 		num_written = btrfs_encoded_write(iocb, from, encoded);
1412 		num_sync = encoded->len;
1413 	} else if (iocb->ki_flags & IOCB_DIRECT) {
1414 		num_written = btrfs_direct_write(iocb, from);
1415 		num_sync = num_written;
1416 	} else {
1417 		num_written = btrfs_buffered_write(iocb, from);
1418 		num_sync = num_written;
1419 	}
1420 
1421 	btrfs_set_inode_last_sub_trans(inode);
1422 
1423 	if (num_sync > 0) {
1424 		num_sync = generic_write_sync(iocb, num_sync);
1425 		if (num_sync < 0)
1426 			num_written = num_sync;
1427 	}
1428 
1429 	return num_written;
1430 }
1431 
1432 static ssize_t btrfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1433 {
1434 	return btrfs_do_write_iter(iocb, from, NULL);
1435 }
1436 
1437 int btrfs_release_file(struct inode *inode, struct file *filp)
1438 {
1439 	struct btrfs_file_private *private = filp->private_data;
1440 
1441 	if (private) {
1442 		kfree(private->filldir_buf);
1443 		free_extent_state(private->llseek_cached_state);
1444 		kfree(private);
1445 		filp->private_data = NULL;
1446 	}
1447 
1448 	/*
1449 	 * Set by setattr when we are about to truncate a file from a non-zero
1450 	 * size to a zero size.  This tries to flush down new bytes that may
1451 	 * have been written if the application were using truncate to replace
1452 	 * a file in place.
1453 	 */
1454 	if (test_and_clear_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
1455 			       &BTRFS_I(inode)->runtime_flags))
1456 			filemap_flush(inode->i_mapping);
1457 	return 0;
1458 }
1459 
1460 static int start_ordered_ops(struct btrfs_inode *inode, loff_t start, loff_t end)
1461 {
1462 	int ret;
1463 	struct blk_plug plug;
1464 
1465 	/*
1466 	 * This is only called in fsync, which would do synchronous writes, so
1467 	 * a plug can merge adjacent IOs as much as possible.  Esp. in case of
1468 	 * multiple disks using raid profile, a large IO can be split to
1469 	 * several segments of stripe length (currently 64K).
1470 	 */
1471 	blk_start_plug(&plug);
1472 	ret = btrfs_fdatawrite_range(inode, start, end);
1473 	blk_finish_plug(&plug);
1474 
1475 	return ret;
1476 }
1477 
1478 static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
1479 {
1480 	struct btrfs_inode *inode = ctx->inode;
1481 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1482 
1483 	if (btrfs_inode_in_log(inode, btrfs_get_fs_generation(fs_info)) &&
1484 	    list_empty(&ctx->ordered_extents))
1485 		return true;
1486 
1487 	/*
1488 	 * If we are doing a fast fsync we can not bail out if the inode's
1489 	 * last_trans is <= then the last committed transaction, because we only
1490 	 * update the last_trans of the inode during ordered extent completion,
1491 	 * and for a fast fsync we don't wait for that, we only wait for the
1492 	 * writeback to complete.
1493 	 */
1494 	if (inode->last_trans <= btrfs_get_last_trans_committed(fs_info) &&
1495 	    (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) ||
1496 	     list_empty(&ctx->ordered_extents)))
1497 		return true;
1498 
1499 	return false;
1500 }
1501 
1502 /*
1503  * fsync call for both files and directories.  This logs the inode into
1504  * the tree log instead of forcing full commits whenever possible.
1505  *
1506  * It needs to call filemap_fdatawait so that all ordered extent updates are
1507  * in the metadata btree are up to date for copying to the log.
1508  *
1509  * It drops the inode mutex before doing the tree log commit.  This is an
1510  * important optimization for directories because holding the mutex prevents
1511  * new operations on the dir while we write to disk.
1512  */
1513 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1514 {
1515 	struct dentry *dentry = file_dentry(file);
1516 	struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
1517 	struct btrfs_root *root = inode->root;
1518 	struct btrfs_fs_info *fs_info = root->fs_info;
1519 	struct btrfs_trans_handle *trans;
1520 	struct btrfs_log_ctx ctx;
1521 	int ret = 0, err;
1522 	u64 len;
1523 	bool full_sync;
1524 	bool skip_ilock = false;
1525 
1526 	if (current->journal_info == BTRFS_TRANS_DIO_WRITE_STUB) {
1527 		skip_ilock = true;
1528 		current->journal_info = NULL;
1529 		btrfs_assert_inode_locked(inode);
1530 	}
1531 
1532 	trace_btrfs_sync_file(file, datasync);
1533 
1534 	btrfs_init_log_ctx(&ctx, inode);
1535 
1536 	/*
1537 	 * Always set the range to a full range, otherwise we can get into
1538 	 * several problems, from missing file extent items to represent holes
1539 	 * when not using the NO_HOLES feature, to log tree corruption due to
1540 	 * races between hole detection during logging and completion of ordered
1541 	 * extents outside the range, to missing checksums due to ordered extents
1542 	 * for which we flushed only a subset of their pages.
1543 	 */
1544 	start = 0;
1545 	end = LLONG_MAX;
1546 	len = (u64)LLONG_MAX + 1;
1547 
1548 	/*
1549 	 * We write the dirty pages in the range and wait until they complete
1550 	 * out of the ->i_mutex. If so, we can flush the dirty pages by
1551 	 * multi-task, and make the performance up.  See
1552 	 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
1553 	 */
1554 	ret = start_ordered_ops(inode, start, end);
1555 	if (ret)
1556 		goto out;
1557 
1558 	if (skip_ilock)
1559 		down_write(&inode->i_mmap_lock);
1560 	else
1561 		btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
1562 
1563 	atomic_inc(&root->log_batch);
1564 
1565 	/*
1566 	 * Before we acquired the inode's lock and the mmap lock, someone may
1567 	 * have dirtied more pages in the target range. We need to make sure
1568 	 * that writeback for any such pages does not start while we are logging
1569 	 * the inode, because if it does, any of the following might happen when
1570 	 * we are not doing a full inode sync:
1571 	 *
1572 	 * 1) We log an extent after its writeback finishes but before its
1573 	 *    checksums are added to the csum tree, leading to -EIO errors
1574 	 *    when attempting to read the extent after a log replay.
1575 	 *
1576 	 * 2) We can end up logging an extent before its writeback finishes.
1577 	 *    Therefore after the log replay we will have a file extent item
1578 	 *    pointing to an unwritten extent (and no data checksums as well).
1579 	 *
1580 	 * So trigger writeback for any eventual new dirty pages and then we
1581 	 * wait for all ordered extents to complete below.
1582 	 */
1583 	ret = start_ordered_ops(inode, start, end);
1584 	if (ret) {
1585 		if (skip_ilock)
1586 			up_write(&inode->i_mmap_lock);
1587 		else
1588 			btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
1589 		goto out;
1590 	}
1591 
1592 	/*
1593 	 * Always check for the full sync flag while holding the inode's lock,
1594 	 * to avoid races with other tasks. The flag must be either set all the
1595 	 * time during logging or always off all the time while logging.
1596 	 * We check the flag here after starting delalloc above, because when
1597 	 * running delalloc the full sync flag may be set if we need to drop
1598 	 * extra extent map ranges due to temporary memory allocation failures.
1599 	 */
1600 	full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
1601 
1602 	/*
1603 	 * We have to do this here to avoid the priority inversion of waiting on
1604 	 * IO of a lower priority task while holding a transaction open.
1605 	 *
1606 	 * For a full fsync we wait for the ordered extents to complete while
1607 	 * for a fast fsync we wait just for writeback to complete, and then
1608 	 * attach the ordered extents to the transaction so that a transaction
1609 	 * commit waits for their completion, to avoid data loss if we fsync,
1610 	 * the current transaction commits before the ordered extents complete
1611 	 * and a power failure happens right after that.
1612 	 *
1613 	 * For zoned filesystem, if a write IO uses a ZONE_APPEND command, the
1614 	 * logical address recorded in the ordered extent may change. We need
1615 	 * to wait for the IO to stabilize the logical address.
1616 	 */
1617 	if (full_sync || btrfs_is_zoned(fs_info)) {
1618 		ret = btrfs_wait_ordered_range(inode, start, len);
1619 		clear_bit(BTRFS_INODE_COW_WRITE_ERROR, &inode->runtime_flags);
1620 	} else {
1621 		/*
1622 		 * Get our ordered extents as soon as possible to avoid doing
1623 		 * checksum lookups in the csum tree, and use instead the
1624 		 * checksums attached to the ordered extents.
1625 		 */
1626 		btrfs_get_ordered_extents_for_logging(inode, &ctx.ordered_extents);
1627 		ret = filemap_fdatawait_range(inode->vfs_inode.i_mapping, start, end);
1628 		if (ret)
1629 			goto out_release_extents;
1630 
1631 		/*
1632 		 * Check and clear the BTRFS_INODE_COW_WRITE_ERROR now after
1633 		 * starting and waiting for writeback, because for buffered IO
1634 		 * it may have been set during the end IO callback
1635 		 * (end_bbio_data_write() -> btrfs_finish_ordered_extent()) in
1636 		 * case an error happened and we need to wait for ordered
1637 		 * extents to complete so that any extent maps that point to
1638 		 * unwritten locations are dropped and we don't log them.
1639 		 */
1640 		if (test_and_clear_bit(BTRFS_INODE_COW_WRITE_ERROR, &inode->runtime_flags))
1641 			ret = btrfs_wait_ordered_range(inode, start, len);
1642 	}
1643 
1644 	if (ret)
1645 		goto out_release_extents;
1646 
1647 	atomic_inc(&root->log_batch);
1648 
1649 	if (skip_inode_logging(&ctx)) {
1650 		/*
1651 		 * We've had everything committed since the last time we were
1652 		 * modified so clear this flag in case it was set for whatever
1653 		 * reason, it's no longer relevant.
1654 		 */
1655 		clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
1656 		/*
1657 		 * An ordered extent might have started before and completed
1658 		 * already with io errors, in which case the inode was not
1659 		 * updated and we end up here. So check the inode's mapping
1660 		 * for any errors that might have happened since we last
1661 		 * checked called fsync.
1662 		 */
1663 		ret = filemap_check_wb_err(inode->vfs_inode.i_mapping, file->f_wb_err);
1664 		goto out_release_extents;
1665 	}
1666 
1667 	btrfs_init_log_ctx_scratch_eb(&ctx);
1668 
1669 	/*
1670 	 * We use start here because we will need to wait on the IO to complete
1671 	 * in btrfs_sync_log, which could require joining a transaction (for
1672 	 * example checking cross references in the nocow path).  If we use join
1673 	 * here we could get into a situation where we're waiting on IO to
1674 	 * happen that is blocked on a transaction trying to commit.  With start
1675 	 * we inc the extwriter counter, so we wait for all extwriters to exit
1676 	 * before we start blocking joiners.  This comment is to keep somebody
1677 	 * from thinking they are super smart and changing this to
1678 	 * btrfs_join_transaction *cough*Josef*cough*.
1679 	 */
1680 	trans = btrfs_start_transaction(root, 0);
1681 	if (IS_ERR(trans)) {
1682 		ret = PTR_ERR(trans);
1683 		goto out_release_extents;
1684 	}
1685 	trans->in_fsync = true;
1686 
1687 	ret = btrfs_log_dentry_safe(trans, dentry, &ctx);
1688 	/*
1689 	 * Scratch eb no longer needed, release before syncing log or commit
1690 	 * transaction, to avoid holding unnecessary memory during such long
1691 	 * operations.
1692 	 */
1693 	if (ctx.scratch_eb) {
1694 		free_extent_buffer(ctx.scratch_eb);
1695 		ctx.scratch_eb = NULL;
1696 	}
1697 	btrfs_release_log_ctx_extents(&ctx);
1698 	if (ret < 0) {
1699 		/* Fallthrough and commit/free transaction. */
1700 		ret = BTRFS_LOG_FORCE_COMMIT;
1701 	}
1702 
1703 	/* we've logged all the items and now have a consistent
1704 	 * version of the file in the log.  It is possible that
1705 	 * someone will come in and modify the file, but that's
1706 	 * fine because the log is consistent on disk, and we
1707 	 * have references to all of the file's extents
1708 	 *
1709 	 * It is possible that someone will come in and log the
1710 	 * file again, but that will end up using the synchronization
1711 	 * inside btrfs_sync_log to keep things safe.
1712 	 */
1713 	if (skip_ilock)
1714 		up_write(&inode->i_mmap_lock);
1715 	else
1716 		btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
1717 
1718 	if (ret == BTRFS_NO_LOG_SYNC) {
1719 		ret = btrfs_end_transaction(trans);
1720 		goto out;
1721 	}
1722 
1723 	/* We successfully logged the inode, attempt to sync the log. */
1724 	if (!ret) {
1725 		ret = btrfs_sync_log(trans, root, &ctx);
1726 		if (!ret) {
1727 			ret = btrfs_end_transaction(trans);
1728 			goto out;
1729 		}
1730 	}
1731 
1732 	/*
1733 	 * At this point we need to commit the transaction because we had
1734 	 * btrfs_need_log_full_commit() or some other error.
1735 	 *
1736 	 * If we didn't do a full sync we have to stop the trans handle, wait on
1737 	 * the ordered extents, start it again and commit the transaction.  If
1738 	 * we attempt to wait on the ordered extents here we could deadlock with
1739 	 * something like fallocate() that is holding the extent lock trying to
1740 	 * start a transaction while some other thread is trying to commit the
1741 	 * transaction while we (fsync) are currently holding the transaction
1742 	 * open.
1743 	 */
1744 	if (!full_sync) {
1745 		ret = btrfs_end_transaction(trans);
1746 		if (ret)
1747 			goto out;
1748 		ret = btrfs_wait_ordered_range(inode, start, len);
1749 		if (ret)
1750 			goto out;
1751 
1752 		/*
1753 		 * This is safe to use here because we're only interested in
1754 		 * making sure the transaction that had the ordered extents is
1755 		 * committed.  We aren't waiting on anything past this point,
1756 		 * we're purely getting the transaction and committing it.
1757 		 */
1758 		trans = btrfs_attach_transaction_barrier(root);
1759 		if (IS_ERR(trans)) {
1760 			ret = PTR_ERR(trans);
1761 
1762 			/*
1763 			 * We committed the transaction and there's no currently
1764 			 * running transaction, this means everything we care
1765 			 * about made it to disk and we are done.
1766 			 */
1767 			if (ret == -ENOENT)
1768 				ret = 0;
1769 			goto out;
1770 		}
1771 	}
1772 
1773 	ret = btrfs_commit_transaction(trans);
1774 out:
1775 	free_extent_buffer(ctx.scratch_eb);
1776 	ASSERT(list_empty(&ctx.list));
1777 	ASSERT(list_empty(&ctx.conflict_inodes));
1778 	err = file_check_and_advance_wb_err(file);
1779 	if (!ret)
1780 		ret = err;
1781 	return ret > 0 ? -EIO : ret;
1782 
1783 out_release_extents:
1784 	btrfs_release_log_ctx_extents(&ctx);
1785 	if (skip_ilock)
1786 		up_write(&inode->i_mmap_lock);
1787 	else
1788 		btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
1789 	goto out;
1790 }
1791 
1792 /*
1793  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
1794  * called from a page fault handler when a page is first dirtied. Hence we must
1795  * be careful to check for EOF conditions here. We set the page up correctly
1796  * for a written page which means we get ENOSPC checking when writing into
1797  * holes and correct delalloc and unwritten extent mapping on filesystems that
1798  * support these features.
1799  *
1800  * We are not allowed to take the i_mutex here so we have to play games to
1801  * protect against truncate races as the page could now be beyond EOF.  Because
1802  * truncate_setsize() writes the inode size before removing pages, once we have
1803  * the page lock we can determine safely if the page is beyond EOF. If it is not
1804  * beyond EOF, then the page is guaranteed safe against truncation until we
1805  * unlock the page.
1806  */
1807 static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
1808 {
1809 	struct page *page = vmf->page;
1810 	struct folio *folio = page_folio(page);
1811 	struct inode *inode = file_inode(vmf->vma->vm_file);
1812 	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
1813 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1814 	struct btrfs_ordered_extent *ordered;
1815 	struct extent_state *cached_state = NULL;
1816 	struct extent_changeset *data_reserved = NULL;
1817 	unsigned long zero_start;
1818 	loff_t size;
1819 	vm_fault_t ret;
1820 	int ret2;
1821 	int reserved = 0;
1822 	u64 reserved_space;
1823 	u64 page_start;
1824 	u64 page_end;
1825 	u64 end;
1826 
1827 	ASSERT(folio_order(folio) == 0);
1828 
1829 	reserved_space = PAGE_SIZE;
1830 
1831 	sb_start_pagefault(inode->i_sb);
1832 	page_start = folio_pos(folio);
1833 	page_end = page_start + folio_size(folio) - 1;
1834 	end = page_end;
1835 
1836 	/*
1837 	 * Reserving delalloc space after obtaining the page lock can lead to
1838 	 * deadlock. For example, if a dirty page is locked by this function
1839 	 * and the call to btrfs_delalloc_reserve_space() ends up triggering
1840 	 * dirty page write out, then the btrfs_writepages() function could
1841 	 * end up waiting indefinitely to get a lock on the page currently
1842 	 * being processed by btrfs_page_mkwrite() function.
1843 	 */
1844 	ret2 = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved,
1845 					    page_start, reserved_space);
1846 	if (!ret2) {
1847 		ret2 = file_update_time(vmf->vma->vm_file);
1848 		reserved = 1;
1849 	}
1850 	if (ret2) {
1851 		ret = vmf_error(ret2);
1852 		if (reserved)
1853 			goto out;
1854 		goto out_noreserve;
1855 	}
1856 
1857 	/* Make the VM retry the fault. */
1858 	ret = VM_FAULT_NOPAGE;
1859 again:
1860 	down_read(&BTRFS_I(inode)->i_mmap_lock);
1861 	folio_lock(folio);
1862 	size = i_size_read(inode);
1863 
1864 	if ((folio->mapping != inode->i_mapping) ||
1865 	    (page_start >= size)) {
1866 		/* Page got truncated out from underneath us. */
1867 		goto out_unlock;
1868 	}
1869 	folio_wait_writeback(folio);
1870 
1871 	lock_extent(io_tree, page_start, page_end, &cached_state);
1872 	ret2 = set_folio_extent_mapped(folio);
1873 	if (ret2 < 0) {
1874 		ret = vmf_error(ret2);
1875 		unlock_extent(io_tree, page_start, page_end, &cached_state);
1876 		goto out_unlock;
1877 	}
1878 
1879 	/*
1880 	 * We can't set the delalloc bits if there are pending ordered
1881 	 * extents.  Drop our locks and wait for them to finish.
1882 	 */
1883 	ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, PAGE_SIZE);
1884 	if (ordered) {
1885 		unlock_extent(io_tree, page_start, page_end, &cached_state);
1886 		folio_unlock(folio);
1887 		up_read(&BTRFS_I(inode)->i_mmap_lock);
1888 		btrfs_start_ordered_extent(ordered);
1889 		btrfs_put_ordered_extent(ordered);
1890 		goto again;
1891 	}
1892 
1893 	if (folio->index == ((size - 1) >> PAGE_SHIFT)) {
1894 		reserved_space = round_up(size - page_start, fs_info->sectorsize);
1895 		if (reserved_space < PAGE_SIZE) {
1896 			end = page_start + reserved_space - 1;
1897 			btrfs_delalloc_release_space(BTRFS_I(inode),
1898 					data_reserved, page_start,
1899 					PAGE_SIZE - reserved_space, true);
1900 		}
1901 	}
1902 
1903 	/*
1904 	 * page_mkwrite gets called when the page is firstly dirtied after it's
1905 	 * faulted in, but write(2) could also dirty a page and set delalloc
1906 	 * bits, thus in this case for space account reason, we still need to
1907 	 * clear any delalloc bits within this page range since we have to
1908 	 * reserve data&meta space before lock_page() (see above comments).
1909 	 */
1910 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
1911 			  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
1912 			  EXTENT_DEFRAG, &cached_state);
1913 
1914 	ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0,
1915 					&cached_state);
1916 	if (ret2) {
1917 		unlock_extent(io_tree, page_start, page_end, &cached_state);
1918 		ret = VM_FAULT_SIGBUS;
1919 		goto out_unlock;
1920 	}
1921 
1922 	/* Page is wholly or partially inside EOF. */
1923 	if (page_start + folio_size(folio) > size)
1924 		zero_start = offset_in_folio(folio, size);
1925 	else
1926 		zero_start = PAGE_SIZE;
1927 
1928 	if (zero_start != PAGE_SIZE)
1929 		folio_zero_range(folio, zero_start, folio_size(folio) - zero_start);
1930 
1931 	btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE);
1932 	btrfs_folio_set_dirty(fs_info, folio, page_start, end + 1 - page_start);
1933 	btrfs_folio_set_uptodate(fs_info, folio, page_start, end + 1 - page_start);
1934 
1935 	btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
1936 
1937 	unlock_extent(io_tree, page_start, page_end, &cached_state);
1938 	up_read(&BTRFS_I(inode)->i_mmap_lock);
1939 
1940 	btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
1941 	sb_end_pagefault(inode->i_sb);
1942 	extent_changeset_free(data_reserved);
1943 	return VM_FAULT_LOCKED;
1944 
1945 out_unlock:
1946 	folio_unlock(folio);
1947 	up_read(&BTRFS_I(inode)->i_mmap_lock);
1948 out:
1949 	btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
1950 	btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start,
1951 				     reserved_space, (ret != 0));
1952 out_noreserve:
1953 	sb_end_pagefault(inode->i_sb);
1954 	extent_changeset_free(data_reserved);
1955 	return ret;
1956 }
1957 
1958 static const struct vm_operations_struct btrfs_file_vm_ops = {
1959 	.fault		= filemap_fault,
1960 	.map_pages	= filemap_map_pages,
1961 	.page_mkwrite	= btrfs_page_mkwrite,
1962 };
1963 
1964 static int btrfs_file_mmap(struct file	*filp, struct vm_area_struct *vma)
1965 {
1966 	struct address_space *mapping = filp->f_mapping;
1967 
1968 	if (!mapping->a_ops->read_folio)
1969 		return -ENOEXEC;
1970 
1971 	file_accessed(filp);
1972 	vma->vm_ops = &btrfs_file_vm_ops;
1973 
1974 	return 0;
1975 }
1976 
1977 static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
1978 			  int slot, u64 start, u64 end)
1979 {
1980 	struct btrfs_file_extent_item *fi;
1981 	struct btrfs_key key;
1982 
1983 	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1984 		return 0;
1985 
1986 	btrfs_item_key_to_cpu(leaf, &key, slot);
1987 	if (key.objectid != btrfs_ino(inode) ||
1988 	    key.type != BTRFS_EXTENT_DATA_KEY)
1989 		return 0;
1990 
1991 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1992 
1993 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
1994 		return 0;
1995 
1996 	if (btrfs_file_extent_disk_bytenr(leaf, fi))
1997 		return 0;
1998 
1999 	if (key.offset == end)
2000 		return 1;
2001 	if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
2002 		return 1;
2003 	return 0;
2004 }
2005 
2006 static int fill_holes(struct btrfs_trans_handle *trans,
2007 		struct btrfs_inode *inode,
2008 		struct btrfs_path *path, u64 offset, u64 end)
2009 {
2010 	struct btrfs_fs_info *fs_info = trans->fs_info;
2011 	struct btrfs_root *root = inode->root;
2012 	struct extent_buffer *leaf;
2013 	struct btrfs_file_extent_item *fi;
2014 	struct extent_map *hole_em;
2015 	struct btrfs_key key;
2016 	int ret;
2017 
2018 	if (btrfs_fs_incompat(fs_info, NO_HOLES))
2019 		goto out;
2020 
2021 	key.objectid = btrfs_ino(inode);
2022 	key.type = BTRFS_EXTENT_DATA_KEY;
2023 	key.offset = offset;
2024 
2025 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2026 	if (ret <= 0) {
2027 		/*
2028 		 * We should have dropped this offset, so if we find it then
2029 		 * something has gone horribly wrong.
2030 		 */
2031 		if (ret == 0)
2032 			ret = -EINVAL;
2033 		return ret;
2034 	}
2035 
2036 	leaf = path->nodes[0];
2037 	if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) {
2038 		u64 num_bytes;
2039 
2040 		path->slots[0]--;
2041 		fi = btrfs_item_ptr(leaf, path->slots[0],
2042 				    struct btrfs_file_extent_item);
2043 		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2044 			end - offset;
2045 		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2046 		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2047 		btrfs_set_file_extent_offset(leaf, fi, 0);
2048 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2049 		btrfs_mark_buffer_dirty(trans, leaf);
2050 		goto out;
2051 	}
2052 
2053 	if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
2054 		u64 num_bytes;
2055 
2056 		key.offset = offset;
2057 		btrfs_set_item_key_safe(trans, path, &key);
2058 		fi = btrfs_item_ptr(leaf, path->slots[0],
2059 				    struct btrfs_file_extent_item);
2060 		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2061 			offset;
2062 		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2063 		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2064 		btrfs_set_file_extent_offset(leaf, fi, 0);
2065 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2066 		btrfs_mark_buffer_dirty(trans, leaf);
2067 		goto out;
2068 	}
2069 	btrfs_release_path(path);
2070 
2071 	ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset,
2072 				       end - offset);
2073 	if (ret)
2074 		return ret;
2075 
2076 out:
2077 	btrfs_release_path(path);
2078 
2079 	hole_em = alloc_extent_map();
2080 	if (!hole_em) {
2081 		btrfs_drop_extent_map_range(inode, offset, end - 1, false);
2082 		btrfs_set_inode_full_sync(inode);
2083 	} else {
2084 		hole_em->start = offset;
2085 		hole_em->len = end - offset;
2086 		hole_em->ram_bytes = hole_em->len;
2087 
2088 		hole_em->disk_bytenr = EXTENT_MAP_HOLE;
2089 		hole_em->disk_num_bytes = 0;
2090 		hole_em->generation = trans->transid;
2091 
2092 		ret = btrfs_replace_extent_map_range(inode, hole_em, true);
2093 		free_extent_map(hole_em);
2094 		if (ret)
2095 			btrfs_set_inode_full_sync(inode);
2096 	}
2097 
2098 	return 0;
2099 }
2100 
2101 /*
2102  * Find a hole extent on given inode and change start/len to the end of hole
2103  * extent.(hole/vacuum extent whose em->start <= start &&
2104  *	   em->start + em->len > start)
2105  * When a hole extent is found, return 1 and modify start/len.
2106  */
2107 static int find_first_non_hole(struct btrfs_inode *inode, u64 *start, u64 *len)
2108 {
2109 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2110 	struct extent_map *em;
2111 	int ret = 0;
2112 
2113 	em = btrfs_get_extent(inode, NULL,
2114 			      round_down(*start, fs_info->sectorsize),
2115 			      round_up(*len, fs_info->sectorsize));
2116 	if (IS_ERR(em))
2117 		return PTR_ERR(em);
2118 
2119 	/* Hole or vacuum extent(only exists in no-hole mode) */
2120 	if (em->disk_bytenr == EXTENT_MAP_HOLE) {
2121 		ret = 1;
2122 		*len = em->start + em->len > *start + *len ?
2123 		       0 : *start + *len - em->start - em->len;
2124 		*start = em->start + em->len;
2125 	}
2126 	free_extent_map(em);
2127 	return ret;
2128 }
2129 
2130 static void btrfs_punch_hole_lock_range(struct inode *inode,
2131 					const u64 lockstart,
2132 					const u64 lockend,
2133 					struct extent_state **cached_state)
2134 {
2135 	/*
2136 	 * For subpage case, if the range is not at page boundary, we could
2137 	 * have pages at the leading/tailing part of the range.
2138 	 * This could lead to dead loop since filemap_range_has_page()
2139 	 * will always return true.
2140 	 * So here we need to do extra page alignment for
2141 	 * filemap_range_has_page().
2142 	 */
2143 	const u64 page_lockstart = round_up(lockstart, PAGE_SIZE);
2144 	const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1;
2145 
2146 	while (1) {
2147 		truncate_pagecache_range(inode, lockstart, lockend);
2148 
2149 		lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2150 			    cached_state);
2151 		/*
2152 		 * We can't have ordered extents in the range, nor dirty/writeback
2153 		 * pages, because we have locked the inode's VFS lock in exclusive
2154 		 * mode, we have locked the inode's i_mmap_lock in exclusive mode,
2155 		 * we have flushed all delalloc in the range and we have waited
2156 		 * for any ordered extents in the range to complete.
2157 		 * We can race with anyone reading pages from this range, so after
2158 		 * locking the range check if we have pages in the range, and if
2159 		 * we do, unlock the range and retry.
2160 		 */
2161 		if (!filemap_range_has_page(inode->i_mapping, page_lockstart,
2162 					    page_lockend))
2163 			break;
2164 
2165 		unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2166 			      cached_state);
2167 	}
2168 
2169 	btrfs_assert_inode_range_clean(BTRFS_I(inode), lockstart, lockend);
2170 }
2171 
2172 static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
2173 				     struct btrfs_inode *inode,
2174 				     struct btrfs_path *path,
2175 				     struct btrfs_replace_extent_info *extent_info,
2176 				     const u64 replace_len,
2177 				     const u64 bytes_to_drop)
2178 {
2179 	struct btrfs_fs_info *fs_info = trans->fs_info;
2180 	struct btrfs_root *root = inode->root;
2181 	struct btrfs_file_extent_item *extent;
2182 	struct extent_buffer *leaf;
2183 	struct btrfs_key key;
2184 	int slot;
2185 	int ret;
2186 
2187 	if (replace_len == 0)
2188 		return 0;
2189 
2190 	if (extent_info->disk_offset == 0 &&
2191 	    btrfs_fs_incompat(fs_info, NO_HOLES)) {
2192 		btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
2193 		return 0;
2194 	}
2195 
2196 	key.objectid = btrfs_ino(inode);
2197 	key.type = BTRFS_EXTENT_DATA_KEY;
2198 	key.offset = extent_info->file_offset;
2199 	ret = btrfs_insert_empty_item(trans, root, path, &key,
2200 				      sizeof(struct btrfs_file_extent_item));
2201 	if (ret)
2202 		return ret;
2203 	leaf = path->nodes[0];
2204 	slot = path->slots[0];
2205 	write_extent_buffer(leaf, extent_info->extent_buf,
2206 			    btrfs_item_ptr_offset(leaf, slot),
2207 			    sizeof(struct btrfs_file_extent_item));
2208 	extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2209 	ASSERT(btrfs_file_extent_type(leaf, extent) != BTRFS_FILE_EXTENT_INLINE);
2210 	btrfs_set_file_extent_offset(leaf, extent, extent_info->data_offset);
2211 	btrfs_set_file_extent_num_bytes(leaf, extent, replace_len);
2212 	if (extent_info->is_new_extent)
2213 		btrfs_set_file_extent_generation(leaf, extent, trans->transid);
2214 	btrfs_mark_buffer_dirty(trans, leaf);
2215 	btrfs_release_path(path);
2216 
2217 	ret = btrfs_inode_set_file_extent_range(inode, extent_info->file_offset,
2218 						replace_len);
2219 	if (ret)
2220 		return ret;
2221 
2222 	/* If it's a hole, nothing more needs to be done. */
2223 	if (extent_info->disk_offset == 0) {
2224 		btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
2225 		return 0;
2226 	}
2227 
2228 	btrfs_update_inode_bytes(inode, replace_len, bytes_to_drop);
2229 
2230 	if (extent_info->is_new_extent && extent_info->insertions == 0) {
2231 		key.objectid = extent_info->disk_offset;
2232 		key.type = BTRFS_EXTENT_ITEM_KEY;
2233 		key.offset = extent_info->disk_len;
2234 		ret = btrfs_alloc_reserved_file_extent(trans, root,
2235 						       btrfs_ino(inode),
2236 						       extent_info->file_offset,
2237 						       extent_info->qgroup_reserved,
2238 						       &key);
2239 	} else {
2240 		struct btrfs_ref ref = {
2241 			.action = BTRFS_ADD_DELAYED_REF,
2242 			.bytenr = extent_info->disk_offset,
2243 			.num_bytes = extent_info->disk_len,
2244 			.owning_root = btrfs_root_id(root),
2245 			.ref_root = btrfs_root_id(root),
2246 		};
2247 		u64 ref_offset;
2248 
2249 		ref_offset = extent_info->file_offset - extent_info->data_offset;
2250 		btrfs_init_data_ref(&ref, btrfs_ino(inode), ref_offset, 0, false);
2251 		ret = btrfs_inc_extent_ref(trans, &ref);
2252 	}
2253 
2254 	extent_info->insertions++;
2255 
2256 	return ret;
2257 }
2258 
2259 /*
2260  * The respective range must have been previously locked, as well as the inode.
2261  * The end offset is inclusive (last byte of the range).
2262  * @extent_info is NULL for fallocate's hole punching and non-NULL when replacing
2263  * the file range with an extent.
2264  * When not punching a hole, we don't want to end up in a state where we dropped
2265  * extents without inserting a new one, so we must abort the transaction to avoid
2266  * a corruption.
2267  */
2268 int btrfs_replace_file_extents(struct btrfs_inode *inode,
2269 			       struct btrfs_path *path, const u64 start,
2270 			       const u64 end,
2271 			       struct btrfs_replace_extent_info *extent_info,
2272 			       struct btrfs_trans_handle **trans_out)
2273 {
2274 	struct btrfs_drop_extents_args drop_args = { 0 };
2275 	struct btrfs_root *root = inode->root;
2276 	struct btrfs_fs_info *fs_info = root->fs_info;
2277 	u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1);
2278 	u64 ino_size = round_up(inode->vfs_inode.i_size, fs_info->sectorsize);
2279 	struct btrfs_trans_handle *trans = NULL;
2280 	struct btrfs_block_rsv *rsv;
2281 	unsigned int rsv_count;
2282 	u64 cur_offset;
2283 	u64 len = end - start;
2284 	int ret = 0;
2285 
2286 	if (end <= start)
2287 		return -EINVAL;
2288 
2289 	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
2290 	if (!rsv) {
2291 		ret = -ENOMEM;
2292 		goto out;
2293 	}
2294 	rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1);
2295 	rsv->failfast = true;
2296 
2297 	/*
2298 	 * 1 - update the inode
2299 	 * 1 - removing the extents in the range
2300 	 * 1 - adding the hole extent if no_holes isn't set or if we are
2301 	 *     replacing the range with a new extent
2302 	 */
2303 	if (!btrfs_fs_incompat(fs_info, NO_HOLES) || extent_info)
2304 		rsv_count = 3;
2305 	else
2306 		rsv_count = 2;
2307 
2308 	trans = btrfs_start_transaction(root, rsv_count);
2309 	if (IS_ERR(trans)) {
2310 		ret = PTR_ERR(trans);
2311 		trans = NULL;
2312 		goto out_free;
2313 	}
2314 
2315 	ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
2316 				      min_size, false);
2317 	if (WARN_ON(ret))
2318 		goto out_trans;
2319 	trans->block_rsv = rsv;
2320 
2321 	cur_offset = start;
2322 	drop_args.path = path;
2323 	drop_args.end = end + 1;
2324 	drop_args.drop_cache = true;
2325 	while (cur_offset < end) {
2326 		drop_args.start = cur_offset;
2327 		ret = btrfs_drop_extents(trans, root, inode, &drop_args);
2328 		/* If we are punching a hole decrement the inode's byte count */
2329 		if (!extent_info)
2330 			btrfs_update_inode_bytes(inode, 0,
2331 						 drop_args.bytes_found);
2332 		if (ret != -ENOSPC) {
2333 			/*
2334 			 * The only time we don't want to abort is if we are
2335 			 * attempting to clone a partial inline extent, in which
2336 			 * case we'll get EOPNOTSUPP.  However if we aren't
2337 			 * clone we need to abort no matter what, because if we
2338 			 * got EOPNOTSUPP via prealloc then we messed up and
2339 			 * need to abort.
2340 			 */
2341 			if (ret &&
2342 			    (ret != -EOPNOTSUPP ||
2343 			     (extent_info && extent_info->is_new_extent)))
2344 				btrfs_abort_transaction(trans, ret);
2345 			break;
2346 		}
2347 
2348 		trans->block_rsv = &fs_info->trans_block_rsv;
2349 
2350 		if (!extent_info && cur_offset < drop_args.drop_end &&
2351 		    cur_offset < ino_size) {
2352 			ret = fill_holes(trans, inode, path, cur_offset,
2353 					 drop_args.drop_end);
2354 			if (ret) {
2355 				/*
2356 				 * If we failed then we didn't insert our hole
2357 				 * entries for the area we dropped, so now the
2358 				 * fs is corrupted, so we must abort the
2359 				 * transaction.
2360 				 */
2361 				btrfs_abort_transaction(trans, ret);
2362 				break;
2363 			}
2364 		} else if (!extent_info && cur_offset < drop_args.drop_end) {
2365 			/*
2366 			 * We are past the i_size here, but since we didn't
2367 			 * insert holes we need to clear the mapped area so we
2368 			 * know to not set disk_i_size in this area until a new
2369 			 * file extent is inserted here.
2370 			 */
2371 			ret = btrfs_inode_clear_file_extent_range(inode,
2372 					cur_offset,
2373 					drop_args.drop_end - cur_offset);
2374 			if (ret) {
2375 				/*
2376 				 * We couldn't clear our area, so we could
2377 				 * presumably adjust up and corrupt the fs, so
2378 				 * we need to abort.
2379 				 */
2380 				btrfs_abort_transaction(trans, ret);
2381 				break;
2382 			}
2383 		}
2384 
2385 		if (extent_info &&
2386 		    drop_args.drop_end > extent_info->file_offset) {
2387 			u64 replace_len = drop_args.drop_end -
2388 					  extent_info->file_offset;
2389 
2390 			ret = btrfs_insert_replace_extent(trans, inode,	path,
2391 					extent_info, replace_len,
2392 					drop_args.bytes_found);
2393 			if (ret) {
2394 				btrfs_abort_transaction(trans, ret);
2395 				break;
2396 			}
2397 			extent_info->data_len -= replace_len;
2398 			extent_info->data_offset += replace_len;
2399 			extent_info->file_offset += replace_len;
2400 		}
2401 
2402 		/*
2403 		 * We are releasing our handle on the transaction, balance the
2404 		 * dirty pages of the btree inode and flush delayed items, and
2405 		 * then get a new transaction handle, which may now point to a
2406 		 * new transaction in case someone else may have committed the
2407 		 * transaction we used to replace/drop file extent items. So
2408 		 * bump the inode's iversion and update mtime and ctime except
2409 		 * if we are called from a dedupe context. This is because a
2410 		 * power failure/crash may happen after the transaction is
2411 		 * committed and before we finish replacing/dropping all the
2412 		 * file extent items we need.
2413 		 */
2414 		inode_inc_iversion(&inode->vfs_inode);
2415 
2416 		if (!extent_info || extent_info->update_times)
2417 			inode_set_mtime_to_ts(&inode->vfs_inode,
2418 					      inode_set_ctime_current(&inode->vfs_inode));
2419 
2420 		ret = btrfs_update_inode(trans, inode);
2421 		if (ret)
2422 			break;
2423 
2424 		btrfs_end_transaction(trans);
2425 		btrfs_btree_balance_dirty(fs_info);
2426 
2427 		trans = btrfs_start_transaction(root, rsv_count);
2428 		if (IS_ERR(trans)) {
2429 			ret = PTR_ERR(trans);
2430 			trans = NULL;
2431 			break;
2432 		}
2433 
2434 		ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
2435 					      rsv, min_size, false);
2436 		if (WARN_ON(ret))
2437 			break;
2438 		trans->block_rsv = rsv;
2439 
2440 		cur_offset = drop_args.drop_end;
2441 		len = end - cur_offset;
2442 		if (!extent_info && len) {
2443 			ret = find_first_non_hole(inode, &cur_offset, &len);
2444 			if (unlikely(ret < 0))
2445 				break;
2446 			if (ret && !len) {
2447 				ret = 0;
2448 				break;
2449 			}
2450 		}
2451 	}
2452 
2453 	/*
2454 	 * If we were cloning, force the next fsync to be a full one since we
2455 	 * we replaced (or just dropped in the case of cloning holes when
2456 	 * NO_HOLES is enabled) file extent items and did not setup new extent
2457 	 * maps for the replacement extents (or holes).
2458 	 */
2459 	if (extent_info && !extent_info->is_new_extent)
2460 		btrfs_set_inode_full_sync(inode);
2461 
2462 	if (ret)
2463 		goto out_trans;
2464 
2465 	trans->block_rsv = &fs_info->trans_block_rsv;
2466 	/*
2467 	 * If we are using the NO_HOLES feature we might have had already an
2468 	 * hole that overlaps a part of the region [lockstart, lockend] and
2469 	 * ends at (or beyond) lockend. Since we have no file extent items to
2470 	 * represent holes, drop_end can be less than lockend and so we must
2471 	 * make sure we have an extent map representing the existing hole (the
2472 	 * call to __btrfs_drop_extents() might have dropped the existing extent
2473 	 * map representing the existing hole), otherwise the fast fsync path
2474 	 * will not record the existence of the hole region
2475 	 * [existing_hole_start, lockend].
2476 	 */
2477 	if (drop_args.drop_end <= end)
2478 		drop_args.drop_end = end + 1;
2479 	/*
2480 	 * Don't insert file hole extent item if it's for a range beyond eof
2481 	 * (because it's useless) or if it represents a 0 bytes range (when
2482 	 * cur_offset == drop_end).
2483 	 */
2484 	if (!extent_info && cur_offset < ino_size &&
2485 	    cur_offset < drop_args.drop_end) {
2486 		ret = fill_holes(trans, inode, path, cur_offset,
2487 				 drop_args.drop_end);
2488 		if (ret) {
2489 			/* Same comment as above. */
2490 			btrfs_abort_transaction(trans, ret);
2491 			goto out_trans;
2492 		}
2493 	} else if (!extent_info && cur_offset < drop_args.drop_end) {
2494 		/* See the comment in the loop above for the reasoning here. */
2495 		ret = btrfs_inode_clear_file_extent_range(inode, cur_offset,
2496 					drop_args.drop_end - cur_offset);
2497 		if (ret) {
2498 			btrfs_abort_transaction(trans, ret);
2499 			goto out_trans;
2500 		}
2501 
2502 	}
2503 	if (extent_info) {
2504 		ret = btrfs_insert_replace_extent(trans, inode, path,
2505 				extent_info, extent_info->data_len,
2506 				drop_args.bytes_found);
2507 		if (ret) {
2508 			btrfs_abort_transaction(trans, ret);
2509 			goto out_trans;
2510 		}
2511 	}
2512 
2513 out_trans:
2514 	if (!trans)
2515 		goto out_free;
2516 
2517 	trans->block_rsv = &fs_info->trans_block_rsv;
2518 	if (ret)
2519 		btrfs_end_transaction(trans);
2520 	else
2521 		*trans_out = trans;
2522 out_free:
2523 	btrfs_free_block_rsv(fs_info, rsv);
2524 out:
2525 	return ret;
2526 }
2527 
2528 static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
2529 {
2530 	struct inode *inode = file_inode(file);
2531 	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
2532 	struct btrfs_root *root = BTRFS_I(inode)->root;
2533 	struct extent_state *cached_state = NULL;
2534 	struct btrfs_path *path;
2535 	struct btrfs_trans_handle *trans = NULL;
2536 	u64 lockstart;
2537 	u64 lockend;
2538 	u64 tail_start;
2539 	u64 tail_len;
2540 	u64 orig_start = offset;
2541 	int ret = 0;
2542 	bool same_block;
2543 	u64 ino_size;
2544 	bool truncated_block = false;
2545 	bool updated_inode = false;
2546 
2547 	btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2548 
2549 	ret = btrfs_wait_ordered_range(BTRFS_I(inode), offset, len);
2550 	if (ret)
2551 		goto out_only_mutex;
2552 
2553 	ino_size = round_up(inode->i_size, fs_info->sectorsize);
2554 	ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
2555 	if (ret < 0)
2556 		goto out_only_mutex;
2557 	if (ret && !len) {
2558 		/* Already in a large hole */
2559 		ret = 0;
2560 		goto out_only_mutex;
2561 	}
2562 
2563 	ret = file_modified(file);
2564 	if (ret)
2565 		goto out_only_mutex;
2566 
2567 	lockstart = round_up(offset, fs_info->sectorsize);
2568 	lockend = round_down(offset + len, fs_info->sectorsize) - 1;
2569 	same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
2570 		== (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
2571 	/*
2572 	 * We needn't truncate any block which is beyond the end of the file
2573 	 * because we are sure there is no data there.
2574 	 */
2575 	/*
2576 	 * Only do this if we are in the same block and we aren't doing the
2577 	 * entire block.
2578 	 */
2579 	if (same_block && len < fs_info->sectorsize) {
2580 		if (offset < ino_size) {
2581 			truncated_block = true;
2582 			ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
2583 						   0);
2584 		} else {
2585 			ret = 0;
2586 		}
2587 		goto out_only_mutex;
2588 	}
2589 
2590 	/* zero back part of the first block */
2591 	if (offset < ino_size) {
2592 		truncated_block = true;
2593 		ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
2594 		if (ret) {
2595 			btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2596 			return ret;
2597 		}
2598 	}
2599 
2600 	/* Check the aligned pages after the first unaligned page,
2601 	 * if offset != orig_start, which means the first unaligned page
2602 	 * including several following pages are already in holes,
2603 	 * the extra check can be skipped */
2604 	if (offset == orig_start) {
2605 		/* after truncate page, check hole again */
2606 		len = offset + len - lockstart;
2607 		offset = lockstart;
2608 		ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
2609 		if (ret < 0)
2610 			goto out_only_mutex;
2611 		if (ret && !len) {
2612 			ret = 0;
2613 			goto out_only_mutex;
2614 		}
2615 		lockstart = offset;
2616 	}
2617 
2618 	/* Check the tail unaligned part is in a hole */
2619 	tail_start = lockend + 1;
2620 	tail_len = offset + len - tail_start;
2621 	if (tail_len) {
2622 		ret = find_first_non_hole(BTRFS_I(inode), &tail_start, &tail_len);
2623 		if (unlikely(ret < 0))
2624 			goto out_only_mutex;
2625 		if (!ret) {
2626 			/* zero the front end of the last page */
2627 			if (tail_start + tail_len < ino_size) {
2628 				truncated_block = true;
2629 				ret = btrfs_truncate_block(BTRFS_I(inode),
2630 							tail_start + tail_len,
2631 							0, 1);
2632 				if (ret)
2633 					goto out_only_mutex;
2634 			}
2635 		}
2636 	}
2637 
2638 	if (lockend < lockstart) {
2639 		ret = 0;
2640 		goto out_only_mutex;
2641 	}
2642 
2643 	btrfs_punch_hole_lock_range(inode, lockstart, lockend, &cached_state);
2644 
2645 	path = btrfs_alloc_path();
2646 	if (!path) {
2647 		ret = -ENOMEM;
2648 		goto out;
2649 	}
2650 
2651 	ret = btrfs_replace_file_extents(BTRFS_I(inode), path, lockstart,
2652 					 lockend, NULL, &trans);
2653 	btrfs_free_path(path);
2654 	if (ret)
2655 		goto out;
2656 
2657 	ASSERT(trans != NULL);
2658 	inode_inc_iversion(inode);
2659 	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
2660 	ret = btrfs_update_inode(trans, BTRFS_I(inode));
2661 	updated_inode = true;
2662 	btrfs_end_transaction(trans);
2663 	btrfs_btree_balance_dirty(fs_info);
2664 out:
2665 	unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2666 		      &cached_state);
2667 out_only_mutex:
2668 	if (!updated_inode && truncated_block && !ret) {
2669 		/*
2670 		 * If we only end up zeroing part of a page, we still need to
2671 		 * update the inode item, so that all the time fields are
2672 		 * updated as well as the necessary btrfs inode in memory fields
2673 		 * for detecting, at fsync time, if the inode isn't yet in the
2674 		 * log tree or it's there but not up to date.
2675 		 */
2676 		struct timespec64 now = inode_set_ctime_current(inode);
2677 
2678 		inode_inc_iversion(inode);
2679 		inode_set_mtime_to_ts(inode, now);
2680 		trans = btrfs_start_transaction(root, 1);
2681 		if (IS_ERR(trans)) {
2682 			ret = PTR_ERR(trans);
2683 		} else {
2684 			int ret2;
2685 
2686 			ret = btrfs_update_inode(trans, BTRFS_I(inode));
2687 			ret2 = btrfs_end_transaction(trans);
2688 			if (!ret)
2689 				ret = ret2;
2690 		}
2691 	}
2692 	btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2693 	return ret;
2694 }
2695 
2696 /* Helper structure to record which range is already reserved */
2697 struct falloc_range {
2698 	struct list_head list;
2699 	u64 start;
2700 	u64 len;
2701 };
2702 
2703 /*
2704  * Helper function to add falloc range
2705  *
2706  * Caller should have locked the larger range of extent containing
2707  * [start, len)
2708  */
2709 static int add_falloc_range(struct list_head *head, u64 start, u64 len)
2710 {
2711 	struct falloc_range *range = NULL;
2712 
2713 	if (!list_empty(head)) {
2714 		/*
2715 		 * As fallocate iterates by bytenr order, we only need to check
2716 		 * the last range.
2717 		 */
2718 		range = list_last_entry(head, struct falloc_range, list);
2719 		if (range->start + range->len == start) {
2720 			range->len += len;
2721 			return 0;
2722 		}
2723 	}
2724 
2725 	range = kmalloc(sizeof(*range), GFP_KERNEL);
2726 	if (!range)
2727 		return -ENOMEM;
2728 	range->start = start;
2729 	range->len = len;
2730 	list_add_tail(&range->list, head);
2731 	return 0;
2732 }
2733 
2734 static int btrfs_fallocate_update_isize(struct inode *inode,
2735 					const u64 end,
2736 					const int mode)
2737 {
2738 	struct btrfs_trans_handle *trans;
2739 	struct btrfs_root *root = BTRFS_I(inode)->root;
2740 	int ret;
2741 	int ret2;
2742 
2743 	if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
2744 		return 0;
2745 
2746 	trans = btrfs_start_transaction(root, 1);
2747 	if (IS_ERR(trans))
2748 		return PTR_ERR(trans);
2749 
2750 	inode_set_ctime_current(inode);
2751 	i_size_write(inode, end);
2752 	btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
2753 	ret = btrfs_update_inode(trans, BTRFS_I(inode));
2754 	ret2 = btrfs_end_transaction(trans);
2755 
2756 	return ret ? ret : ret2;
2757 }
2758 
2759 enum {
2760 	RANGE_BOUNDARY_WRITTEN_EXTENT,
2761 	RANGE_BOUNDARY_PREALLOC_EXTENT,
2762 	RANGE_BOUNDARY_HOLE,
2763 };
2764 
2765 static int btrfs_zero_range_check_range_boundary(struct btrfs_inode *inode,
2766 						 u64 offset)
2767 {
2768 	const u64 sectorsize = inode->root->fs_info->sectorsize;
2769 	struct extent_map *em;
2770 	int ret;
2771 
2772 	offset = round_down(offset, sectorsize);
2773 	em = btrfs_get_extent(inode, NULL, offset, sectorsize);
2774 	if (IS_ERR(em))
2775 		return PTR_ERR(em);
2776 
2777 	if (em->disk_bytenr == EXTENT_MAP_HOLE)
2778 		ret = RANGE_BOUNDARY_HOLE;
2779 	else if (em->flags & EXTENT_FLAG_PREALLOC)
2780 		ret = RANGE_BOUNDARY_PREALLOC_EXTENT;
2781 	else
2782 		ret = RANGE_BOUNDARY_WRITTEN_EXTENT;
2783 
2784 	free_extent_map(em);
2785 	return ret;
2786 }
2787 
2788 static int btrfs_zero_range(struct inode *inode,
2789 			    loff_t offset,
2790 			    loff_t len,
2791 			    const int mode)
2792 {
2793 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2794 	struct extent_map *em;
2795 	struct extent_changeset *data_reserved = NULL;
2796 	int ret;
2797 	u64 alloc_hint = 0;
2798 	const u64 sectorsize = fs_info->sectorsize;
2799 	u64 alloc_start = round_down(offset, sectorsize);
2800 	u64 alloc_end = round_up(offset + len, sectorsize);
2801 	u64 bytes_to_reserve = 0;
2802 	bool space_reserved = false;
2803 
2804 	em = btrfs_get_extent(BTRFS_I(inode), NULL, alloc_start,
2805 			      alloc_end - alloc_start);
2806 	if (IS_ERR(em)) {
2807 		ret = PTR_ERR(em);
2808 		goto out;
2809 	}
2810 
2811 	/*
2812 	 * Avoid hole punching and extent allocation for some cases. More cases
2813 	 * could be considered, but these are unlikely common and we keep things
2814 	 * as simple as possible for now. Also, intentionally, if the target
2815 	 * range contains one or more prealloc extents together with regular
2816 	 * extents and holes, we drop all the existing extents and allocate a
2817 	 * new prealloc extent, so that we get a larger contiguous disk extent.
2818 	 */
2819 	if (em->start <= alloc_start && (em->flags & EXTENT_FLAG_PREALLOC)) {
2820 		const u64 em_end = em->start + em->len;
2821 
2822 		if (em_end >= offset + len) {
2823 			/*
2824 			 * The whole range is already a prealloc extent,
2825 			 * do nothing except updating the inode's i_size if
2826 			 * needed.
2827 			 */
2828 			free_extent_map(em);
2829 			ret = btrfs_fallocate_update_isize(inode, offset + len,
2830 							   mode);
2831 			goto out;
2832 		}
2833 		/*
2834 		 * Part of the range is already a prealloc extent, so operate
2835 		 * only on the remaining part of the range.
2836 		 */
2837 		alloc_start = em_end;
2838 		ASSERT(IS_ALIGNED(alloc_start, sectorsize));
2839 		len = offset + len - alloc_start;
2840 		offset = alloc_start;
2841 		alloc_hint = extent_map_block_start(em) + em->len;
2842 	}
2843 	free_extent_map(em);
2844 
2845 	if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
2846 	    BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
2847 		em = btrfs_get_extent(BTRFS_I(inode), NULL, alloc_start, sectorsize);
2848 		if (IS_ERR(em)) {
2849 			ret = PTR_ERR(em);
2850 			goto out;
2851 		}
2852 
2853 		if (em->flags & EXTENT_FLAG_PREALLOC) {
2854 			free_extent_map(em);
2855 			ret = btrfs_fallocate_update_isize(inode, offset + len,
2856 							   mode);
2857 			goto out;
2858 		}
2859 		if (len < sectorsize && em->disk_bytenr != EXTENT_MAP_HOLE) {
2860 			free_extent_map(em);
2861 			ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
2862 						   0);
2863 			if (!ret)
2864 				ret = btrfs_fallocate_update_isize(inode,
2865 								   offset + len,
2866 								   mode);
2867 			return ret;
2868 		}
2869 		free_extent_map(em);
2870 		alloc_start = round_down(offset, sectorsize);
2871 		alloc_end = alloc_start + sectorsize;
2872 		goto reserve_space;
2873 	}
2874 
2875 	alloc_start = round_up(offset, sectorsize);
2876 	alloc_end = round_down(offset + len, sectorsize);
2877 
2878 	/*
2879 	 * For unaligned ranges, check the pages at the boundaries, they might
2880 	 * map to an extent, in which case we need to partially zero them, or
2881 	 * they might map to a hole, in which case we need our allocation range
2882 	 * to cover them.
2883 	 */
2884 	if (!IS_ALIGNED(offset, sectorsize)) {
2885 		ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
2886 							    offset);
2887 		if (ret < 0)
2888 			goto out;
2889 		if (ret == RANGE_BOUNDARY_HOLE) {
2890 			alloc_start = round_down(offset, sectorsize);
2891 			ret = 0;
2892 		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
2893 			ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
2894 			if (ret)
2895 				goto out;
2896 		} else {
2897 			ret = 0;
2898 		}
2899 	}
2900 
2901 	if (!IS_ALIGNED(offset + len, sectorsize)) {
2902 		ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
2903 							    offset + len);
2904 		if (ret < 0)
2905 			goto out;
2906 		if (ret == RANGE_BOUNDARY_HOLE) {
2907 			alloc_end = round_up(offset + len, sectorsize);
2908 			ret = 0;
2909 		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
2910 			ret = btrfs_truncate_block(BTRFS_I(inode), offset + len,
2911 						   0, 1);
2912 			if (ret)
2913 				goto out;
2914 		} else {
2915 			ret = 0;
2916 		}
2917 	}
2918 
2919 reserve_space:
2920 	if (alloc_start < alloc_end) {
2921 		struct extent_state *cached_state = NULL;
2922 		const u64 lockstart = alloc_start;
2923 		const u64 lockend = alloc_end - 1;
2924 
2925 		bytes_to_reserve = alloc_end - alloc_start;
2926 		ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
2927 						      bytes_to_reserve);
2928 		if (ret < 0)
2929 			goto out;
2930 		space_reserved = true;
2931 		btrfs_punch_hole_lock_range(inode, lockstart, lockend,
2932 					    &cached_state);
2933 		ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
2934 						alloc_start, bytes_to_reserve);
2935 		if (ret) {
2936 			unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
2937 				      lockend, &cached_state);
2938 			goto out;
2939 		}
2940 		ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
2941 						alloc_end - alloc_start,
2942 						fs_info->sectorsize,
2943 						offset + len, &alloc_hint);
2944 		unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2945 			      &cached_state);
2946 		/* btrfs_prealloc_file_range releases reserved space on error */
2947 		if (ret) {
2948 			space_reserved = false;
2949 			goto out;
2950 		}
2951 	}
2952 	ret = btrfs_fallocate_update_isize(inode, offset + len, mode);
2953  out:
2954 	if (ret && space_reserved)
2955 		btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
2956 					       alloc_start, bytes_to_reserve);
2957 	extent_changeset_free(data_reserved);
2958 
2959 	return ret;
2960 }
2961 
2962 static long btrfs_fallocate(struct file *file, int mode,
2963 			    loff_t offset, loff_t len)
2964 {
2965 	struct inode *inode = file_inode(file);
2966 	struct extent_state *cached_state = NULL;
2967 	struct extent_changeset *data_reserved = NULL;
2968 	struct falloc_range *range;
2969 	struct falloc_range *tmp;
2970 	LIST_HEAD(reserve_list);
2971 	u64 cur_offset;
2972 	u64 last_byte;
2973 	u64 alloc_start;
2974 	u64 alloc_end;
2975 	u64 alloc_hint = 0;
2976 	u64 locked_end;
2977 	u64 actual_end = 0;
2978 	u64 data_space_needed = 0;
2979 	u64 data_space_reserved = 0;
2980 	u64 qgroup_reserved = 0;
2981 	struct extent_map *em;
2982 	int blocksize = BTRFS_I(inode)->root->fs_info->sectorsize;
2983 	int ret;
2984 
2985 	/* Do not allow fallocate in ZONED mode */
2986 	if (btrfs_is_zoned(inode_to_fs_info(inode)))
2987 		return -EOPNOTSUPP;
2988 
2989 	alloc_start = round_down(offset, blocksize);
2990 	alloc_end = round_up(offset + len, blocksize);
2991 	cur_offset = alloc_start;
2992 
2993 	/* Make sure we aren't being give some crap mode */
2994 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
2995 		     FALLOC_FL_ZERO_RANGE))
2996 		return -EOPNOTSUPP;
2997 
2998 	if (mode & FALLOC_FL_PUNCH_HOLE)
2999 		return btrfs_punch_hole(file, offset, len);
3000 
3001 	btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
3002 
3003 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
3004 		ret = inode_newsize_ok(inode, offset + len);
3005 		if (ret)
3006 			goto out;
3007 	}
3008 
3009 	ret = file_modified(file);
3010 	if (ret)
3011 		goto out;
3012 
3013 	/*
3014 	 * TODO: Move these two operations after we have checked
3015 	 * accurate reserved space, or fallocate can still fail but
3016 	 * with page truncated or size expanded.
3017 	 *
3018 	 * But that's a minor problem and won't do much harm BTW.
3019 	 */
3020 	if (alloc_start > inode->i_size) {
3021 		ret = btrfs_cont_expand(BTRFS_I(inode), i_size_read(inode),
3022 					alloc_start);
3023 		if (ret)
3024 			goto out;
3025 	} else if (offset + len > inode->i_size) {
3026 		/*
3027 		 * If we are fallocating from the end of the file onward we
3028 		 * need to zero out the end of the block if i_size lands in the
3029 		 * middle of a block.
3030 		 */
3031 		ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, 0, 0);
3032 		if (ret)
3033 			goto out;
3034 	}
3035 
3036 	/*
3037 	 * We have locked the inode at the VFS level (in exclusive mode) and we
3038 	 * have locked the i_mmap_lock lock (in exclusive mode). Now before
3039 	 * locking the file range, flush all dealloc in the range and wait for
3040 	 * all ordered extents in the range to complete. After this we can lock
3041 	 * the file range and, due to the previous locking we did, we know there
3042 	 * can't be more delalloc or ordered extents in the range.
3043 	 */
3044 	ret = btrfs_wait_ordered_range(BTRFS_I(inode), alloc_start,
3045 				       alloc_end - alloc_start);
3046 	if (ret)
3047 		goto out;
3048 
3049 	if (mode & FALLOC_FL_ZERO_RANGE) {
3050 		ret = btrfs_zero_range(inode, offset, len, mode);
3051 		btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
3052 		return ret;
3053 	}
3054 
3055 	locked_end = alloc_end - 1;
3056 	lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3057 		    &cached_state);
3058 
3059 	btrfs_assert_inode_range_clean(BTRFS_I(inode), alloc_start, locked_end);
3060 
3061 	/* First, check if we exceed the qgroup limit */
3062 	while (cur_offset < alloc_end) {
3063 		em = btrfs_get_extent(BTRFS_I(inode), NULL, cur_offset,
3064 				      alloc_end - cur_offset);
3065 		if (IS_ERR(em)) {
3066 			ret = PTR_ERR(em);
3067 			break;
3068 		}
3069 		last_byte = min(extent_map_end(em), alloc_end);
3070 		actual_end = min_t(u64, extent_map_end(em), offset + len);
3071 		last_byte = ALIGN(last_byte, blocksize);
3072 		if (em->disk_bytenr == EXTENT_MAP_HOLE ||
3073 		    (cur_offset >= inode->i_size &&
3074 		     !(em->flags & EXTENT_FLAG_PREALLOC))) {
3075 			const u64 range_len = last_byte - cur_offset;
3076 
3077 			ret = add_falloc_range(&reserve_list, cur_offset, range_len);
3078 			if (ret < 0) {
3079 				free_extent_map(em);
3080 				break;
3081 			}
3082 			ret = btrfs_qgroup_reserve_data(BTRFS_I(inode),
3083 					&data_reserved, cur_offset, range_len);
3084 			if (ret < 0) {
3085 				free_extent_map(em);
3086 				break;
3087 			}
3088 			qgroup_reserved += range_len;
3089 			data_space_needed += range_len;
3090 		}
3091 		free_extent_map(em);
3092 		cur_offset = last_byte;
3093 	}
3094 
3095 	if (!ret && data_space_needed > 0) {
3096 		/*
3097 		 * We are safe to reserve space here as we can't have delalloc
3098 		 * in the range, see above.
3099 		 */
3100 		ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3101 						      data_space_needed);
3102 		if (!ret)
3103 			data_space_reserved = data_space_needed;
3104 	}
3105 
3106 	/*
3107 	 * If ret is still 0, means we're OK to fallocate.
3108 	 * Or just cleanup the list and exit.
3109 	 */
3110 	list_for_each_entry_safe(range, tmp, &reserve_list, list) {
3111 		if (!ret) {
3112 			ret = btrfs_prealloc_file_range(inode, mode,
3113 					range->start,
3114 					range->len, blocksize,
3115 					offset + len, &alloc_hint);
3116 			/*
3117 			 * btrfs_prealloc_file_range() releases space even
3118 			 * if it returns an error.
3119 			 */
3120 			data_space_reserved -= range->len;
3121 			qgroup_reserved -= range->len;
3122 		} else if (data_space_reserved > 0) {
3123 			btrfs_free_reserved_data_space(BTRFS_I(inode),
3124 					       data_reserved, range->start,
3125 					       range->len);
3126 			data_space_reserved -= range->len;
3127 			qgroup_reserved -= range->len;
3128 		} else if (qgroup_reserved > 0) {
3129 			btrfs_qgroup_free_data(BTRFS_I(inode), data_reserved,
3130 					       range->start, range->len, NULL);
3131 			qgroup_reserved -= range->len;
3132 		}
3133 		list_del(&range->list);
3134 		kfree(range);
3135 	}
3136 	if (ret < 0)
3137 		goto out_unlock;
3138 
3139 	/*
3140 	 * We didn't need to allocate any more space, but we still extended the
3141 	 * size of the file so we need to update i_size and the inode item.
3142 	 */
3143 	ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
3144 out_unlock:
3145 	unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3146 		      &cached_state);
3147 out:
3148 	btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
3149 	extent_changeset_free(data_reserved);
3150 	return ret;
3151 }
3152 
3153 /*
3154  * Helper for btrfs_find_delalloc_in_range(). Find a subrange in a given range
3155  * that has unflushed and/or flushing delalloc. There might be other adjacent
3156  * subranges after the one it found, so btrfs_find_delalloc_in_range() keeps
3157  * looping while it gets adjacent subranges, and merging them together.
3158  */
3159 static bool find_delalloc_subrange(struct btrfs_inode *inode, u64 start, u64 end,
3160 				   struct extent_state **cached_state,
3161 				   bool *search_io_tree,
3162 				   u64 *delalloc_start_ret, u64 *delalloc_end_ret)
3163 {
3164 	u64 len = end + 1 - start;
3165 	u64 delalloc_len = 0;
3166 	struct btrfs_ordered_extent *oe;
3167 	u64 oe_start;
3168 	u64 oe_end;
3169 
3170 	/*
3171 	 * Search the io tree first for EXTENT_DELALLOC. If we find any, it
3172 	 * means we have delalloc (dirty pages) for which writeback has not
3173 	 * started yet.
3174 	 */
3175 	if (*search_io_tree) {
3176 		spin_lock(&inode->lock);
3177 		if (inode->delalloc_bytes > 0) {
3178 			spin_unlock(&inode->lock);
3179 			*delalloc_start_ret = start;
3180 			delalloc_len = count_range_bits(&inode->io_tree,
3181 							delalloc_start_ret, end,
3182 							len, EXTENT_DELALLOC, 1,
3183 							cached_state);
3184 		} else {
3185 			spin_unlock(&inode->lock);
3186 		}
3187 	}
3188 
3189 	if (delalloc_len > 0) {
3190 		/*
3191 		 * If delalloc was found then *delalloc_start_ret has a sector size
3192 		 * aligned value (rounded down).
3193 		 */
3194 		*delalloc_end_ret = *delalloc_start_ret + delalloc_len - 1;
3195 
3196 		if (*delalloc_start_ret == start) {
3197 			/* Delalloc for the whole range, nothing more to do. */
3198 			if (*delalloc_end_ret == end)
3199 				return true;
3200 			/* Else trim our search range for ordered extents. */
3201 			start = *delalloc_end_ret + 1;
3202 			len = end + 1 - start;
3203 		}
3204 	} else {
3205 		/* No delalloc, future calls don't need to search again. */
3206 		*search_io_tree = false;
3207 	}
3208 
3209 	/*
3210 	 * Now also check if there's any ordered extent in the range.
3211 	 * We do this because:
3212 	 *
3213 	 * 1) When delalloc is flushed, the file range is locked, we clear the
3214 	 *    EXTENT_DELALLOC bit from the io tree and create an extent map and
3215 	 *    an ordered extent for the write. So we might just have been called
3216 	 *    after delalloc is flushed and before the ordered extent completes
3217 	 *    and inserts the new file extent item in the subvolume's btree;
3218 	 *
3219 	 * 2) We may have an ordered extent created by flushing delalloc for a
3220 	 *    subrange that starts before the subrange we found marked with
3221 	 *    EXTENT_DELALLOC in the io tree.
3222 	 *
3223 	 * We could also use the extent map tree to find such delalloc that is
3224 	 * being flushed, but using the ordered extents tree is more efficient
3225 	 * because it's usually much smaller as ordered extents are removed from
3226 	 * the tree once they complete. With the extent maps, we mau have them
3227 	 * in the extent map tree for a very long time, and they were either
3228 	 * created by previous writes or loaded by read operations.
3229 	 */
3230 	oe = btrfs_lookup_first_ordered_range(inode, start, len);
3231 	if (!oe)
3232 		return (delalloc_len > 0);
3233 
3234 	/* The ordered extent may span beyond our search range. */
3235 	oe_start = max(oe->file_offset, start);
3236 	oe_end = min(oe->file_offset + oe->num_bytes - 1, end);
3237 
3238 	btrfs_put_ordered_extent(oe);
3239 
3240 	/* Don't have unflushed delalloc, return the ordered extent range. */
3241 	if (delalloc_len == 0) {
3242 		*delalloc_start_ret = oe_start;
3243 		*delalloc_end_ret = oe_end;
3244 		return true;
3245 	}
3246 
3247 	/*
3248 	 * We have both unflushed delalloc (io_tree) and an ordered extent.
3249 	 * If the ranges are adjacent returned a combined range, otherwise
3250 	 * return the leftmost range.
3251 	 */
3252 	if (oe_start < *delalloc_start_ret) {
3253 		if (oe_end < *delalloc_start_ret)
3254 			*delalloc_end_ret = oe_end;
3255 		*delalloc_start_ret = oe_start;
3256 	} else if (*delalloc_end_ret + 1 == oe_start) {
3257 		*delalloc_end_ret = oe_end;
3258 	}
3259 
3260 	return true;
3261 }
3262 
3263 /*
3264  * Check if there's delalloc in a given range.
3265  *
3266  * @inode:               The inode.
3267  * @start:               The start offset of the range. It does not need to be
3268  *                       sector size aligned.
3269  * @end:                 The end offset (inclusive value) of the search range.
3270  *                       It does not need to be sector size aligned.
3271  * @cached_state:        Extent state record used for speeding up delalloc
3272  *                       searches in the inode's io_tree. Can be NULL.
3273  * @delalloc_start_ret:  Output argument, set to the start offset of the
3274  *                       subrange found with delalloc (may not be sector size
3275  *                       aligned).
3276  * @delalloc_end_ret:    Output argument, set to he end offset (inclusive value)
3277  *                       of the subrange found with delalloc.
3278  *
3279  * Returns true if a subrange with delalloc is found within the given range, and
3280  * if so it sets @delalloc_start_ret and @delalloc_end_ret with the start and
3281  * end offsets of the subrange.
3282  */
3283 bool btrfs_find_delalloc_in_range(struct btrfs_inode *inode, u64 start, u64 end,
3284 				  struct extent_state **cached_state,
3285 				  u64 *delalloc_start_ret, u64 *delalloc_end_ret)
3286 {
3287 	u64 cur_offset = round_down(start, inode->root->fs_info->sectorsize);
3288 	u64 prev_delalloc_end = 0;
3289 	bool search_io_tree = true;
3290 	bool ret = false;
3291 
3292 	while (cur_offset <= end) {
3293 		u64 delalloc_start;
3294 		u64 delalloc_end;
3295 		bool delalloc;
3296 
3297 		delalloc = find_delalloc_subrange(inode, cur_offset, end,
3298 						  cached_state, &search_io_tree,
3299 						  &delalloc_start,
3300 						  &delalloc_end);
3301 		if (!delalloc)
3302 			break;
3303 
3304 		if (prev_delalloc_end == 0) {
3305 			/* First subrange found. */
3306 			*delalloc_start_ret = max(delalloc_start, start);
3307 			*delalloc_end_ret = delalloc_end;
3308 			ret = true;
3309 		} else if (delalloc_start == prev_delalloc_end + 1) {
3310 			/* Subrange adjacent to the previous one, merge them. */
3311 			*delalloc_end_ret = delalloc_end;
3312 		} else {
3313 			/* Subrange not adjacent to the previous one, exit. */
3314 			break;
3315 		}
3316 
3317 		prev_delalloc_end = delalloc_end;
3318 		cur_offset = delalloc_end + 1;
3319 		cond_resched();
3320 	}
3321 
3322 	return ret;
3323 }
3324 
3325 /*
3326  * Check if there's a hole or delalloc range in a range representing a hole (or
3327  * prealloc extent) found in the inode's subvolume btree.
3328  *
3329  * @inode:      The inode.
3330  * @whence:     Seek mode (SEEK_DATA or SEEK_HOLE).
3331  * @start:      Start offset of the hole region. It does not need to be sector
3332  *              size aligned.
3333  * @end:        End offset (inclusive value) of the hole region. It does not
3334  *              need to be sector size aligned.
3335  * @start_ret:  Return parameter, used to set the start of the subrange in the
3336  *              hole that matches the search criteria (seek mode), if such
3337  *              subrange is found (return value of the function is true).
3338  *              The value returned here may not be sector size aligned.
3339  *
3340  * Returns true if a subrange matching the given seek mode is found, and if one
3341  * is found, it updates @start_ret with the start of the subrange.
3342  */
3343 static bool find_desired_extent_in_hole(struct btrfs_inode *inode, int whence,
3344 					struct extent_state **cached_state,
3345 					u64 start, u64 end, u64 *start_ret)
3346 {
3347 	u64 delalloc_start;
3348 	u64 delalloc_end;
3349 	bool delalloc;
3350 
3351 	delalloc = btrfs_find_delalloc_in_range(inode, start, end, cached_state,
3352 						&delalloc_start, &delalloc_end);
3353 	if (delalloc && whence == SEEK_DATA) {
3354 		*start_ret = delalloc_start;
3355 		return true;
3356 	}
3357 
3358 	if (delalloc && whence == SEEK_HOLE) {
3359 		/*
3360 		 * We found delalloc but it starts after out start offset. So we
3361 		 * have a hole between our start offset and the delalloc start.
3362 		 */
3363 		if (start < delalloc_start) {
3364 			*start_ret = start;
3365 			return true;
3366 		}
3367 		/*
3368 		 * Delalloc range starts at our start offset.
3369 		 * If the delalloc range's length is smaller than our range,
3370 		 * then it means we have a hole that starts where the delalloc
3371 		 * subrange ends.
3372 		 */
3373 		if (delalloc_end < end) {
3374 			*start_ret = delalloc_end + 1;
3375 			return true;
3376 		}
3377 
3378 		/* There's delalloc for the whole range. */
3379 		return false;
3380 	}
3381 
3382 	if (!delalloc && whence == SEEK_HOLE) {
3383 		*start_ret = start;
3384 		return true;
3385 	}
3386 
3387 	/*
3388 	 * No delalloc in the range and we are seeking for data. The caller has
3389 	 * to iterate to the next extent item in the subvolume btree.
3390 	 */
3391 	return false;
3392 }
3393 
3394 static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
3395 {
3396 	struct btrfs_inode *inode = BTRFS_I(file->f_mapping->host);
3397 	struct btrfs_file_private *private;
3398 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3399 	struct extent_state *cached_state = NULL;
3400 	struct extent_state **delalloc_cached_state;
3401 	const loff_t i_size = i_size_read(&inode->vfs_inode);
3402 	const u64 ino = btrfs_ino(inode);
3403 	struct btrfs_root *root = inode->root;
3404 	struct btrfs_path *path;
3405 	struct btrfs_key key;
3406 	u64 last_extent_end;
3407 	u64 lockstart;
3408 	u64 lockend;
3409 	u64 start;
3410 	int ret;
3411 	bool found = false;
3412 
3413 	if (i_size == 0 || offset >= i_size)
3414 		return -ENXIO;
3415 
3416 	/*
3417 	 * Quick path. If the inode has no prealloc extents and its number of
3418 	 * bytes used matches its i_size, then it can not have holes.
3419 	 */
3420 	if (whence == SEEK_HOLE &&
3421 	    !(inode->flags & BTRFS_INODE_PREALLOC) &&
3422 	    inode_get_bytes(&inode->vfs_inode) == i_size)
3423 		return i_size;
3424 
3425 	spin_lock(&inode->lock);
3426 	private = file->private_data;
3427 	spin_unlock(&inode->lock);
3428 
3429 	if (private && private->owner_task != current) {
3430 		/*
3431 		 * Not allocated by us, don't use it as its cached state is used
3432 		 * by the task that allocated it and we don't want neither to
3433 		 * mess with it nor get incorrect results because it reflects an
3434 		 * invalid state for the current task.
3435 		 */
3436 		private = NULL;
3437 	} else if (!private) {
3438 		private = kzalloc(sizeof(*private), GFP_KERNEL);
3439 		/*
3440 		 * No worries if memory allocation failed.
3441 		 * The private structure is used only for speeding up multiple
3442 		 * lseek SEEK_HOLE/DATA calls to a file when there's delalloc,
3443 		 * so everything will still be correct.
3444 		 */
3445 		if (private) {
3446 			bool free = false;
3447 
3448 			private->owner_task = current;
3449 
3450 			spin_lock(&inode->lock);
3451 			if (file->private_data)
3452 				free = true;
3453 			else
3454 				file->private_data = private;
3455 			spin_unlock(&inode->lock);
3456 
3457 			if (free) {
3458 				kfree(private);
3459 				private = NULL;
3460 			}
3461 		}
3462 	}
3463 
3464 	if (private)
3465 		delalloc_cached_state = &private->llseek_cached_state;
3466 	else
3467 		delalloc_cached_state = NULL;
3468 
3469 	/*
3470 	 * offset can be negative, in this case we start finding DATA/HOLE from
3471 	 * the very start of the file.
3472 	 */
3473 	start = max_t(loff_t, 0, offset);
3474 
3475 	lockstart = round_down(start, fs_info->sectorsize);
3476 	lockend = round_up(i_size, fs_info->sectorsize);
3477 	if (lockend <= lockstart)
3478 		lockend = lockstart + fs_info->sectorsize;
3479 	lockend--;
3480 
3481 	path = btrfs_alloc_path();
3482 	if (!path)
3483 		return -ENOMEM;
3484 	path->reada = READA_FORWARD;
3485 
3486 	key.objectid = ino;
3487 	key.type = BTRFS_EXTENT_DATA_KEY;
3488 	key.offset = start;
3489 
3490 	last_extent_end = lockstart;
3491 
3492 	lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3493 
3494 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3495 	if (ret < 0) {
3496 		goto out;
3497 	} else if (ret > 0 && path->slots[0] > 0) {
3498 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
3499 		if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
3500 			path->slots[0]--;
3501 	}
3502 
3503 	while (start < i_size) {
3504 		struct extent_buffer *leaf = path->nodes[0];
3505 		struct btrfs_file_extent_item *extent;
3506 		u64 extent_end;
3507 		u8 type;
3508 
3509 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3510 			ret = btrfs_next_leaf(root, path);
3511 			if (ret < 0)
3512 				goto out;
3513 			else if (ret > 0)
3514 				break;
3515 
3516 			leaf = path->nodes[0];
3517 		}
3518 
3519 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3520 		if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
3521 			break;
3522 
3523 		extent_end = btrfs_file_extent_end(path);
3524 
3525 		/*
3526 		 * In the first iteration we may have a slot that points to an
3527 		 * extent that ends before our start offset, so skip it.
3528 		 */
3529 		if (extent_end <= start) {
3530 			path->slots[0]++;
3531 			continue;
3532 		}
3533 
3534 		/* We have an implicit hole, NO_HOLES feature is likely set. */
3535 		if (last_extent_end < key.offset) {
3536 			u64 search_start = last_extent_end;
3537 			u64 found_start;
3538 
3539 			/*
3540 			 * First iteration, @start matches @offset and it's
3541 			 * within the hole.
3542 			 */
3543 			if (start == offset)
3544 				search_start = offset;
3545 
3546 			found = find_desired_extent_in_hole(inode, whence,
3547 							    delalloc_cached_state,
3548 							    search_start,
3549 							    key.offset - 1,
3550 							    &found_start);
3551 			if (found) {
3552 				start = found_start;
3553 				break;
3554 			}
3555 			/*
3556 			 * Didn't find data or a hole (due to delalloc) in the
3557 			 * implicit hole range, so need to analyze the extent.
3558 			 */
3559 		}
3560 
3561 		extent = btrfs_item_ptr(leaf, path->slots[0],
3562 					struct btrfs_file_extent_item);
3563 		type = btrfs_file_extent_type(leaf, extent);
3564 
3565 		/*
3566 		 * Can't access the extent's disk_bytenr field if this is an
3567 		 * inline extent, since at that offset, it's where the extent
3568 		 * data starts.
3569 		 */
3570 		if (type == BTRFS_FILE_EXTENT_PREALLOC ||
3571 		    (type == BTRFS_FILE_EXTENT_REG &&
3572 		     btrfs_file_extent_disk_bytenr(leaf, extent) == 0)) {
3573 			/*
3574 			 * Explicit hole or prealloc extent, search for delalloc.
3575 			 * A prealloc extent is treated like a hole.
3576 			 */
3577 			u64 search_start = key.offset;
3578 			u64 found_start;
3579 
3580 			/*
3581 			 * First iteration, @start matches @offset and it's
3582 			 * within the hole.
3583 			 */
3584 			if (start == offset)
3585 				search_start = offset;
3586 
3587 			found = find_desired_extent_in_hole(inode, whence,
3588 							    delalloc_cached_state,
3589 							    search_start,
3590 							    extent_end - 1,
3591 							    &found_start);
3592 			if (found) {
3593 				start = found_start;
3594 				break;
3595 			}
3596 			/*
3597 			 * Didn't find data or a hole (due to delalloc) in the
3598 			 * implicit hole range, so need to analyze the next
3599 			 * extent item.
3600 			 */
3601 		} else {
3602 			/*
3603 			 * Found a regular or inline extent.
3604 			 * If we are seeking for data, adjust the start offset
3605 			 * and stop, we're done.
3606 			 */
3607 			if (whence == SEEK_DATA) {
3608 				start = max_t(u64, key.offset, offset);
3609 				found = true;
3610 				break;
3611 			}
3612 			/*
3613 			 * Else, we are seeking for a hole, check the next file
3614 			 * extent item.
3615 			 */
3616 		}
3617 
3618 		start = extent_end;
3619 		last_extent_end = extent_end;
3620 		path->slots[0]++;
3621 		if (fatal_signal_pending(current)) {
3622 			ret = -EINTR;
3623 			goto out;
3624 		}
3625 		cond_resched();
3626 	}
3627 
3628 	/* We have an implicit hole from the last extent found up to i_size. */
3629 	if (!found && start < i_size) {
3630 		found = find_desired_extent_in_hole(inode, whence,
3631 						    delalloc_cached_state, start,
3632 						    i_size - 1, &start);
3633 		if (!found)
3634 			start = i_size;
3635 	}
3636 
3637 out:
3638 	unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3639 	btrfs_free_path(path);
3640 
3641 	if (ret < 0)
3642 		return ret;
3643 
3644 	if (whence == SEEK_DATA && start >= i_size)
3645 		return -ENXIO;
3646 
3647 	return min_t(loff_t, start, i_size);
3648 }
3649 
3650 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
3651 {
3652 	struct inode *inode = file->f_mapping->host;
3653 
3654 	switch (whence) {
3655 	default:
3656 		return generic_file_llseek(file, offset, whence);
3657 	case SEEK_DATA:
3658 	case SEEK_HOLE:
3659 		btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
3660 		offset = find_desired_extent(file, offset, whence);
3661 		btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
3662 		break;
3663 	}
3664 
3665 	if (offset < 0)
3666 		return offset;
3667 
3668 	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3669 }
3670 
3671 static int btrfs_file_open(struct inode *inode, struct file *filp)
3672 {
3673 	int ret;
3674 
3675 	filp->f_mode |= FMODE_NOWAIT | FMODE_CAN_ODIRECT;
3676 
3677 	ret = fsverity_file_open(inode, filp);
3678 	if (ret)
3679 		return ret;
3680 	return generic_file_open(inode, filp);
3681 }
3682 
3683 static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
3684 {
3685 	ssize_t ret = 0;
3686 
3687 	if (iocb->ki_flags & IOCB_DIRECT) {
3688 		ret = btrfs_direct_read(iocb, to);
3689 		if (ret < 0 || !iov_iter_count(to) ||
3690 		    iocb->ki_pos >= i_size_read(file_inode(iocb->ki_filp)))
3691 			return ret;
3692 	}
3693 
3694 	return filemap_read(iocb, to, ret);
3695 }
3696 
3697 const struct file_operations btrfs_file_operations = {
3698 	.llseek		= btrfs_file_llseek,
3699 	.read_iter      = btrfs_file_read_iter,
3700 	.splice_read	= filemap_splice_read,
3701 	.write_iter	= btrfs_file_write_iter,
3702 	.splice_write	= iter_file_splice_write,
3703 	.mmap		= btrfs_file_mmap,
3704 	.open		= btrfs_file_open,
3705 	.release	= btrfs_release_file,
3706 	.get_unmapped_area = thp_get_unmapped_area,
3707 	.fsync		= btrfs_sync_file,
3708 	.fallocate	= btrfs_fallocate,
3709 	.unlocked_ioctl	= btrfs_ioctl,
3710 #ifdef CONFIG_COMPAT
3711 	.compat_ioctl	= btrfs_compat_ioctl,
3712 #endif
3713 	.remap_file_range = btrfs_remap_file_range,
3714 	.uring_cmd	= btrfs_uring_cmd,
3715 	.fop_flags	= FOP_BUFFER_RASYNC | FOP_BUFFER_WASYNC,
3716 };
3717 
3718 int btrfs_fdatawrite_range(struct btrfs_inode *inode, loff_t start, loff_t end)
3719 {
3720 	struct address_space *mapping = inode->vfs_inode.i_mapping;
3721 	int ret;
3722 
3723 	/*
3724 	 * So with compression we will find and lock a dirty page and clear the
3725 	 * first one as dirty, setup an async extent, and immediately return
3726 	 * with the entire range locked but with nobody actually marked with
3727 	 * writeback.  So we can't just filemap_write_and_wait_range() and
3728 	 * expect it to work since it will just kick off a thread to do the
3729 	 * actual work.  So we need to call filemap_fdatawrite_range _again_
3730 	 * since it will wait on the page lock, which won't be unlocked until
3731 	 * after the pages have been marked as writeback and so we're good to go
3732 	 * from there.  We have to do this otherwise we'll miss the ordered
3733 	 * extents and that results in badness.  Please Josef, do not think you
3734 	 * know better and pull this out at some point in the future, it is
3735 	 * right and you are wrong.
3736 	 */
3737 	ret = filemap_fdatawrite_range(mapping, start, end);
3738 	if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags))
3739 		ret = filemap_fdatawrite_range(mapping, start, end);
3740 
3741 	return ret;
3742 }
3743