xref: /linux/fs/btrfs/file.c (revision 7fe03f8ff55d33fe6398637f78a8620dd2a78b38)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/fs.h>
7 #include <linux/pagemap.h>
8 #include <linux/time.h>
9 #include <linux/init.h>
10 #include <linux/string.h>
11 #include <linux/backing-dev.h>
12 #include <linux/falloc.h>
13 #include <linux/writeback.h>
14 #include <linux/compat.h>
15 #include <linux/slab.h>
16 #include <linux/btrfs.h>
17 #include <linux/uio.h>
18 #include <linux/iversion.h>
19 #include <linux/fsverity.h>
20 #include "ctree.h"
21 #include "direct-io.h"
22 #include "disk-io.h"
23 #include "transaction.h"
24 #include "btrfs_inode.h"
25 #include "tree-log.h"
26 #include "locking.h"
27 #include "qgroup.h"
28 #include "compression.h"
29 #include "delalloc-space.h"
30 #include "reflink.h"
31 #include "subpage.h"
32 #include "fs.h"
33 #include "accessors.h"
34 #include "extent-tree.h"
35 #include "file-item.h"
36 #include "ioctl.h"
37 #include "file.h"
38 #include "super.h"
39 #include "print-tree.h"
40 
41 /*
42  * Unlock folio after btrfs_file_write() is done with it.
43  */
44 static void btrfs_drop_folio(struct btrfs_fs_info *fs_info, struct folio *folio,
45 			     u64 pos, u64 copied)
46 {
47 	u64 block_start = round_down(pos, fs_info->sectorsize);
48 	u64 block_len = round_up(pos + copied, fs_info->sectorsize) - block_start;
49 
50 	ASSERT(block_len <= U32_MAX);
51 	/*
52 	 * Folio checked is some magic around finding folios that have been
53 	 * modified without going through btrfs_dirty_folio().  Clear it here.
54 	 * There should be no need to mark the pages accessed as
55 	 * prepare_one_folio() should have marked them accessed in
56 	 * prepare_one_folio() via find_or_create_page()
57 	 */
58 	btrfs_folio_clamp_clear_checked(fs_info, folio, block_start, block_len);
59 	folio_unlock(folio);
60 	folio_put(folio);
61 }
62 
63 /*
64  * After copy_folio_from_iter_atomic(), update the following things for delalloc:
65  * - Mark newly dirtied folio as DELALLOC in the io tree.
66  *   Used to advise which range is to be written back.
67  * - Mark modified folio as Uptodate/Dirty and not needing COW fixup
68  * - Update inode size for past EOF write
69  */
70 int btrfs_dirty_folio(struct btrfs_inode *inode, struct folio *folio, loff_t pos,
71 		      size_t write_bytes, struct extent_state **cached, bool noreserve)
72 {
73 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
74 	int ret = 0;
75 	u64 num_bytes;
76 	u64 start_pos;
77 	u64 end_of_last_block;
78 	u64 end_pos = pos + write_bytes;
79 	loff_t isize = i_size_read(&inode->vfs_inode);
80 	unsigned int extra_bits = 0;
81 
82 	if (write_bytes == 0)
83 		return 0;
84 
85 	if (noreserve)
86 		extra_bits |= EXTENT_NORESERVE;
87 
88 	start_pos = round_down(pos, fs_info->sectorsize);
89 	num_bytes = round_up(write_bytes + pos - start_pos,
90 			     fs_info->sectorsize);
91 	ASSERT(num_bytes <= U32_MAX);
92 	ASSERT(folio_pos(folio) <= pos &&
93 	       folio_pos(folio) + folio_size(folio) >= pos + write_bytes);
94 
95 	end_of_last_block = start_pos + num_bytes - 1;
96 
97 	/*
98 	 * The pages may have already been dirty, clear out old accounting so
99 	 * we can set things up properly
100 	 */
101 	clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block,
102 			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
103 			 cached);
104 
105 	ret = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
106 					extra_bits, cached);
107 	if (ret)
108 		return ret;
109 
110 	btrfs_folio_clamp_set_uptodate(fs_info, folio, start_pos, num_bytes);
111 	btrfs_folio_clamp_clear_checked(fs_info, folio, start_pos, num_bytes);
112 	btrfs_folio_clamp_set_dirty(fs_info, folio, start_pos, num_bytes);
113 
114 	/*
115 	 * we've only changed i_size in ram, and we haven't updated
116 	 * the disk i_size.  There is no need to log the inode
117 	 * at this time.
118 	 */
119 	if (end_pos > isize)
120 		i_size_write(&inode->vfs_inode, end_pos);
121 	return 0;
122 }
123 
124 /*
125  * this is very complex, but the basic idea is to drop all extents
126  * in the range start - end.  hint_block is filled in with a block number
127  * that would be a good hint to the block allocator for this file.
128  *
129  * If an extent intersects the range but is not entirely inside the range
130  * it is either truncated or split.  Anything entirely inside the range
131  * is deleted from the tree.
132  *
133  * Note: the VFS' inode number of bytes is not updated, it's up to the caller
134  * to deal with that. We set the field 'bytes_found' of the arguments structure
135  * with the number of allocated bytes found in the target range, so that the
136  * caller can update the inode's number of bytes in an atomic way when
137  * replacing extents in a range to avoid races with stat(2).
138  */
139 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
140 		       struct btrfs_root *root, struct btrfs_inode *inode,
141 		       struct btrfs_drop_extents_args *args)
142 {
143 	struct btrfs_fs_info *fs_info = root->fs_info;
144 	struct extent_buffer *leaf;
145 	struct btrfs_file_extent_item *fi;
146 	struct btrfs_key key;
147 	struct btrfs_key new_key;
148 	u64 ino = btrfs_ino(inode);
149 	u64 search_start = args->start;
150 	u64 disk_bytenr = 0;
151 	u64 num_bytes = 0;
152 	u64 extent_offset = 0;
153 	u64 extent_end = 0;
154 	u64 last_end = args->start;
155 	int del_nr = 0;
156 	int del_slot = 0;
157 	int extent_type;
158 	int recow;
159 	int ret;
160 	int modify_tree = -1;
161 	int update_refs;
162 	int found = 0;
163 	struct btrfs_path *path = args->path;
164 
165 	args->bytes_found = 0;
166 	args->extent_inserted = false;
167 
168 	/* Must always have a path if ->replace_extent is true */
169 	ASSERT(!(args->replace_extent && !args->path));
170 
171 	if (!path) {
172 		path = btrfs_alloc_path();
173 		if (!path) {
174 			ret = -ENOMEM;
175 			goto out;
176 		}
177 	}
178 
179 	if (args->drop_cache)
180 		btrfs_drop_extent_map_range(inode, args->start, args->end - 1, false);
181 
182 	if (data_race(args->start >= inode->disk_i_size) && !args->replace_extent)
183 		modify_tree = 0;
184 
185 	update_refs = (btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID);
186 	while (1) {
187 		recow = 0;
188 		ret = btrfs_lookup_file_extent(trans, root, path, ino,
189 					       search_start, modify_tree);
190 		if (ret < 0)
191 			break;
192 		if (ret > 0 && path->slots[0] > 0 && search_start == args->start) {
193 			leaf = path->nodes[0];
194 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
195 			if (key.objectid == ino &&
196 			    key.type == BTRFS_EXTENT_DATA_KEY)
197 				path->slots[0]--;
198 		}
199 		ret = 0;
200 next_slot:
201 		leaf = path->nodes[0];
202 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
203 			if (WARN_ON(del_nr > 0)) {
204 				btrfs_print_leaf(leaf);
205 				ret = -EINVAL;
206 				break;
207 			}
208 			ret = btrfs_next_leaf(root, path);
209 			if (ret < 0)
210 				break;
211 			if (ret > 0) {
212 				ret = 0;
213 				break;
214 			}
215 			leaf = path->nodes[0];
216 			recow = 1;
217 		}
218 
219 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
220 
221 		if (key.objectid > ino)
222 			break;
223 		if (WARN_ON_ONCE(key.objectid < ino) ||
224 		    key.type < BTRFS_EXTENT_DATA_KEY) {
225 			ASSERT(del_nr == 0);
226 			path->slots[0]++;
227 			goto next_slot;
228 		}
229 		if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= args->end)
230 			break;
231 
232 		fi = btrfs_item_ptr(leaf, path->slots[0],
233 				    struct btrfs_file_extent_item);
234 		extent_type = btrfs_file_extent_type(leaf, fi);
235 
236 		if (extent_type == BTRFS_FILE_EXTENT_REG ||
237 		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
238 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
239 			num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
240 			extent_offset = btrfs_file_extent_offset(leaf, fi);
241 			extent_end = key.offset +
242 				btrfs_file_extent_num_bytes(leaf, fi);
243 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
244 			extent_end = key.offset +
245 				btrfs_file_extent_ram_bytes(leaf, fi);
246 		} else {
247 			/* can't happen */
248 			BUG();
249 		}
250 
251 		/*
252 		 * Don't skip extent items representing 0 byte lengths. They
253 		 * used to be created (bug) if while punching holes we hit
254 		 * -ENOSPC condition. So if we find one here, just ensure we
255 		 * delete it, otherwise we would insert a new file extent item
256 		 * with the same key (offset) as that 0 bytes length file
257 		 * extent item in the call to setup_items_for_insert() later
258 		 * in this function.
259 		 */
260 		if (extent_end == key.offset && extent_end >= search_start) {
261 			last_end = extent_end;
262 			goto delete_extent_item;
263 		}
264 
265 		if (extent_end <= search_start) {
266 			path->slots[0]++;
267 			goto next_slot;
268 		}
269 
270 		found = 1;
271 		search_start = max(key.offset, args->start);
272 		if (recow || !modify_tree) {
273 			modify_tree = -1;
274 			btrfs_release_path(path);
275 			continue;
276 		}
277 
278 		/*
279 		 *     | - range to drop - |
280 		 *  | -------- extent -------- |
281 		 */
282 		if (args->start > key.offset && args->end < extent_end) {
283 			if (WARN_ON(del_nr > 0)) {
284 				btrfs_print_leaf(leaf);
285 				ret = -EINVAL;
286 				break;
287 			}
288 			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
289 				ret = -EOPNOTSUPP;
290 				break;
291 			}
292 
293 			memcpy(&new_key, &key, sizeof(new_key));
294 			new_key.offset = args->start;
295 			ret = btrfs_duplicate_item(trans, root, path,
296 						   &new_key);
297 			if (ret == -EAGAIN) {
298 				btrfs_release_path(path);
299 				continue;
300 			}
301 			if (ret < 0)
302 				break;
303 
304 			leaf = path->nodes[0];
305 			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
306 					    struct btrfs_file_extent_item);
307 			btrfs_set_file_extent_num_bytes(leaf, fi,
308 							args->start - key.offset);
309 
310 			fi = btrfs_item_ptr(leaf, path->slots[0],
311 					    struct btrfs_file_extent_item);
312 
313 			extent_offset += args->start - key.offset;
314 			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
315 			btrfs_set_file_extent_num_bytes(leaf, fi,
316 							extent_end - args->start);
317 
318 			if (update_refs && disk_bytenr > 0) {
319 				struct btrfs_ref ref = {
320 					.action = BTRFS_ADD_DELAYED_REF,
321 					.bytenr = disk_bytenr,
322 					.num_bytes = num_bytes,
323 					.parent = 0,
324 					.owning_root = btrfs_root_id(root),
325 					.ref_root = btrfs_root_id(root),
326 				};
327 				btrfs_init_data_ref(&ref, new_key.objectid,
328 						    args->start - extent_offset,
329 						    0, false);
330 				ret = btrfs_inc_extent_ref(trans, &ref);
331 				if (ret) {
332 					btrfs_abort_transaction(trans, ret);
333 					break;
334 				}
335 			}
336 			key.offset = args->start;
337 		}
338 		/*
339 		 * From here on out we will have actually dropped something, so
340 		 * last_end can be updated.
341 		 */
342 		last_end = extent_end;
343 
344 		/*
345 		 *  | ---- range to drop ----- |
346 		 *      | -------- extent -------- |
347 		 */
348 		if (args->start <= key.offset && args->end < extent_end) {
349 			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
350 				ret = -EOPNOTSUPP;
351 				break;
352 			}
353 
354 			memcpy(&new_key, &key, sizeof(new_key));
355 			new_key.offset = args->end;
356 			btrfs_set_item_key_safe(trans, path, &new_key);
357 
358 			extent_offset += args->end - key.offset;
359 			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
360 			btrfs_set_file_extent_num_bytes(leaf, fi,
361 							extent_end - args->end);
362 			if (update_refs && disk_bytenr > 0)
363 				args->bytes_found += args->end - key.offset;
364 			break;
365 		}
366 
367 		search_start = extent_end;
368 		/*
369 		 *       | ---- range to drop ----- |
370 		 *  | -------- extent -------- |
371 		 */
372 		if (args->start > key.offset && args->end >= extent_end) {
373 			if (WARN_ON(del_nr > 0)) {
374 				btrfs_print_leaf(leaf);
375 				ret = -EINVAL;
376 				break;
377 			}
378 			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
379 				ret = -EOPNOTSUPP;
380 				break;
381 			}
382 
383 			btrfs_set_file_extent_num_bytes(leaf, fi,
384 							args->start - key.offset);
385 			if (update_refs && disk_bytenr > 0)
386 				args->bytes_found += extent_end - args->start;
387 			if (args->end == extent_end)
388 				break;
389 
390 			path->slots[0]++;
391 			goto next_slot;
392 		}
393 
394 		/*
395 		 *  | ---- range to drop ----- |
396 		 *    | ------ extent ------ |
397 		 */
398 		if (args->start <= key.offset && args->end >= extent_end) {
399 delete_extent_item:
400 			if (del_nr == 0) {
401 				del_slot = path->slots[0];
402 				del_nr = 1;
403 			} else {
404 				if (WARN_ON(del_slot + del_nr != path->slots[0])) {
405 					btrfs_print_leaf(leaf);
406 					ret = -EINVAL;
407 					break;
408 				}
409 				del_nr++;
410 			}
411 
412 			if (update_refs &&
413 			    extent_type == BTRFS_FILE_EXTENT_INLINE) {
414 				args->bytes_found += extent_end - key.offset;
415 				extent_end = ALIGN(extent_end,
416 						   fs_info->sectorsize);
417 			} else if (update_refs && disk_bytenr > 0) {
418 				struct btrfs_ref ref = {
419 					.action = BTRFS_DROP_DELAYED_REF,
420 					.bytenr = disk_bytenr,
421 					.num_bytes = num_bytes,
422 					.parent = 0,
423 					.owning_root = btrfs_root_id(root),
424 					.ref_root = btrfs_root_id(root),
425 				};
426 				btrfs_init_data_ref(&ref, key.objectid,
427 						    key.offset - extent_offset,
428 						    0, false);
429 				ret = btrfs_free_extent(trans, &ref);
430 				if (ret) {
431 					btrfs_abort_transaction(trans, ret);
432 					break;
433 				}
434 				args->bytes_found += extent_end - key.offset;
435 			}
436 
437 			if (args->end == extent_end)
438 				break;
439 
440 			if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
441 				path->slots[0]++;
442 				goto next_slot;
443 			}
444 
445 			ret = btrfs_del_items(trans, root, path, del_slot,
446 					      del_nr);
447 			if (ret) {
448 				btrfs_abort_transaction(trans, ret);
449 				break;
450 			}
451 
452 			del_nr = 0;
453 			del_slot = 0;
454 
455 			btrfs_release_path(path);
456 			continue;
457 		}
458 
459 		BUG();
460 	}
461 
462 	if (!ret && del_nr > 0) {
463 		/*
464 		 * Set path->slots[0] to first slot, so that after the delete
465 		 * if items are move off from our leaf to its immediate left or
466 		 * right neighbor leafs, we end up with a correct and adjusted
467 		 * path->slots[0] for our insertion (if args->replace_extent).
468 		 */
469 		path->slots[0] = del_slot;
470 		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
471 		if (ret)
472 			btrfs_abort_transaction(trans, ret);
473 	}
474 
475 	leaf = path->nodes[0];
476 	/*
477 	 * If btrfs_del_items() was called, it might have deleted a leaf, in
478 	 * which case it unlocked our path, so check path->locks[0] matches a
479 	 * write lock.
480 	 */
481 	if (!ret && args->replace_extent &&
482 	    path->locks[0] == BTRFS_WRITE_LOCK &&
483 	    btrfs_leaf_free_space(leaf) >=
484 	    sizeof(struct btrfs_item) + args->extent_item_size) {
485 
486 		key.objectid = ino;
487 		key.type = BTRFS_EXTENT_DATA_KEY;
488 		key.offset = args->start;
489 		if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
490 			struct btrfs_key slot_key;
491 
492 			btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
493 			if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
494 				path->slots[0]++;
495 		}
496 		btrfs_setup_item_for_insert(trans, root, path, &key,
497 					    args->extent_item_size);
498 		args->extent_inserted = true;
499 	}
500 
501 	if (!args->path)
502 		btrfs_free_path(path);
503 	else if (!args->extent_inserted)
504 		btrfs_release_path(path);
505 out:
506 	args->drop_end = found ? min(args->end, last_end) : args->end;
507 
508 	return ret;
509 }
510 
511 static int extent_mergeable(struct extent_buffer *leaf, int slot,
512 			    u64 objectid, u64 bytenr, u64 orig_offset,
513 			    u64 *start, u64 *end)
514 {
515 	struct btrfs_file_extent_item *fi;
516 	struct btrfs_key key;
517 	u64 extent_end;
518 
519 	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
520 		return 0;
521 
522 	btrfs_item_key_to_cpu(leaf, &key, slot);
523 	if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
524 		return 0;
525 
526 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
527 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
528 	    btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
529 	    btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
530 	    btrfs_file_extent_compression(leaf, fi) ||
531 	    btrfs_file_extent_encryption(leaf, fi) ||
532 	    btrfs_file_extent_other_encoding(leaf, fi))
533 		return 0;
534 
535 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
536 	if ((*start && *start != key.offset) || (*end && *end != extent_end))
537 		return 0;
538 
539 	*start = key.offset;
540 	*end = extent_end;
541 	return 1;
542 }
543 
544 /*
545  * Mark extent in the range start - end as written.
546  *
547  * This changes extent type from 'pre-allocated' to 'regular'. If only
548  * part of extent is marked as written, the extent will be split into
549  * two or three.
550  */
551 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
552 			      struct btrfs_inode *inode, u64 start, u64 end)
553 {
554 	struct btrfs_root *root = inode->root;
555 	struct extent_buffer *leaf;
556 	struct btrfs_path *path;
557 	struct btrfs_file_extent_item *fi;
558 	struct btrfs_ref ref = { 0 };
559 	struct btrfs_key key;
560 	struct btrfs_key new_key;
561 	u64 bytenr;
562 	u64 num_bytes;
563 	u64 extent_end;
564 	u64 orig_offset;
565 	u64 other_start;
566 	u64 other_end;
567 	u64 split;
568 	int del_nr = 0;
569 	int del_slot = 0;
570 	int recow;
571 	int ret = 0;
572 	u64 ino = btrfs_ino(inode);
573 
574 	path = btrfs_alloc_path();
575 	if (!path)
576 		return -ENOMEM;
577 again:
578 	recow = 0;
579 	split = start;
580 	key.objectid = ino;
581 	key.type = BTRFS_EXTENT_DATA_KEY;
582 	key.offset = split;
583 
584 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
585 	if (ret < 0)
586 		goto out;
587 	if (ret > 0 && path->slots[0] > 0)
588 		path->slots[0]--;
589 
590 	leaf = path->nodes[0];
591 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
592 	if (key.objectid != ino ||
593 	    key.type != BTRFS_EXTENT_DATA_KEY) {
594 		ret = -EINVAL;
595 		btrfs_abort_transaction(trans, ret);
596 		goto out;
597 	}
598 	fi = btrfs_item_ptr(leaf, path->slots[0],
599 			    struct btrfs_file_extent_item);
600 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
601 		ret = -EINVAL;
602 		btrfs_abort_transaction(trans, ret);
603 		goto out;
604 	}
605 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
606 	if (key.offset > start || extent_end < end) {
607 		ret = -EINVAL;
608 		btrfs_abort_transaction(trans, ret);
609 		goto out;
610 	}
611 
612 	bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
613 	num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
614 	orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
615 	memcpy(&new_key, &key, sizeof(new_key));
616 
617 	if (start == key.offset && end < extent_end) {
618 		other_start = 0;
619 		other_end = start;
620 		if (extent_mergeable(leaf, path->slots[0] - 1,
621 				     ino, bytenr, orig_offset,
622 				     &other_start, &other_end)) {
623 			new_key.offset = end;
624 			btrfs_set_item_key_safe(trans, path, &new_key);
625 			fi = btrfs_item_ptr(leaf, path->slots[0],
626 					    struct btrfs_file_extent_item);
627 			btrfs_set_file_extent_generation(leaf, fi,
628 							 trans->transid);
629 			btrfs_set_file_extent_num_bytes(leaf, fi,
630 							extent_end - end);
631 			btrfs_set_file_extent_offset(leaf, fi,
632 						     end - orig_offset);
633 			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
634 					    struct btrfs_file_extent_item);
635 			btrfs_set_file_extent_generation(leaf, fi,
636 							 trans->transid);
637 			btrfs_set_file_extent_num_bytes(leaf, fi,
638 							end - other_start);
639 			goto out;
640 		}
641 	}
642 
643 	if (start > key.offset && end == extent_end) {
644 		other_start = end;
645 		other_end = 0;
646 		if (extent_mergeable(leaf, path->slots[0] + 1,
647 				     ino, bytenr, orig_offset,
648 				     &other_start, &other_end)) {
649 			fi = btrfs_item_ptr(leaf, path->slots[0],
650 					    struct btrfs_file_extent_item);
651 			btrfs_set_file_extent_num_bytes(leaf, fi,
652 							start - key.offset);
653 			btrfs_set_file_extent_generation(leaf, fi,
654 							 trans->transid);
655 			path->slots[0]++;
656 			new_key.offset = start;
657 			btrfs_set_item_key_safe(trans, path, &new_key);
658 
659 			fi = btrfs_item_ptr(leaf, path->slots[0],
660 					    struct btrfs_file_extent_item);
661 			btrfs_set_file_extent_generation(leaf, fi,
662 							 trans->transid);
663 			btrfs_set_file_extent_num_bytes(leaf, fi,
664 							other_end - start);
665 			btrfs_set_file_extent_offset(leaf, fi,
666 						     start - orig_offset);
667 			goto out;
668 		}
669 	}
670 
671 	while (start > key.offset || end < extent_end) {
672 		if (key.offset == start)
673 			split = end;
674 
675 		new_key.offset = split;
676 		ret = btrfs_duplicate_item(trans, root, path, &new_key);
677 		if (ret == -EAGAIN) {
678 			btrfs_release_path(path);
679 			goto again;
680 		}
681 		if (ret < 0) {
682 			btrfs_abort_transaction(trans, ret);
683 			goto out;
684 		}
685 
686 		leaf = path->nodes[0];
687 		fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
688 				    struct btrfs_file_extent_item);
689 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
690 		btrfs_set_file_extent_num_bytes(leaf, fi,
691 						split - key.offset);
692 
693 		fi = btrfs_item_ptr(leaf, path->slots[0],
694 				    struct btrfs_file_extent_item);
695 
696 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
697 		btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
698 		btrfs_set_file_extent_num_bytes(leaf, fi,
699 						extent_end - split);
700 
701 		ref.action = BTRFS_ADD_DELAYED_REF;
702 		ref.bytenr = bytenr;
703 		ref.num_bytes = num_bytes;
704 		ref.parent = 0;
705 		ref.owning_root = btrfs_root_id(root);
706 		ref.ref_root = btrfs_root_id(root);
707 		btrfs_init_data_ref(&ref, ino, orig_offset, 0, false);
708 		ret = btrfs_inc_extent_ref(trans, &ref);
709 		if (ret) {
710 			btrfs_abort_transaction(trans, ret);
711 			goto out;
712 		}
713 
714 		if (split == start) {
715 			key.offset = start;
716 		} else {
717 			if (start != key.offset) {
718 				ret = -EINVAL;
719 				btrfs_abort_transaction(trans, ret);
720 				goto out;
721 			}
722 			path->slots[0]--;
723 			extent_end = end;
724 		}
725 		recow = 1;
726 	}
727 
728 	other_start = end;
729 	other_end = 0;
730 
731 	ref.action = BTRFS_DROP_DELAYED_REF;
732 	ref.bytenr = bytenr;
733 	ref.num_bytes = num_bytes;
734 	ref.parent = 0;
735 	ref.owning_root = btrfs_root_id(root);
736 	ref.ref_root = btrfs_root_id(root);
737 	btrfs_init_data_ref(&ref, ino, orig_offset, 0, false);
738 	if (extent_mergeable(leaf, path->slots[0] + 1,
739 			     ino, bytenr, orig_offset,
740 			     &other_start, &other_end)) {
741 		if (recow) {
742 			btrfs_release_path(path);
743 			goto again;
744 		}
745 		extent_end = other_end;
746 		del_slot = path->slots[0] + 1;
747 		del_nr++;
748 		ret = btrfs_free_extent(trans, &ref);
749 		if (ret) {
750 			btrfs_abort_transaction(trans, ret);
751 			goto out;
752 		}
753 	}
754 	other_start = 0;
755 	other_end = start;
756 	if (extent_mergeable(leaf, path->slots[0] - 1,
757 			     ino, bytenr, orig_offset,
758 			     &other_start, &other_end)) {
759 		if (recow) {
760 			btrfs_release_path(path);
761 			goto again;
762 		}
763 		key.offset = other_start;
764 		del_slot = path->slots[0];
765 		del_nr++;
766 		ret = btrfs_free_extent(trans, &ref);
767 		if (ret) {
768 			btrfs_abort_transaction(trans, ret);
769 			goto out;
770 		}
771 	}
772 	if (del_nr == 0) {
773 		fi = btrfs_item_ptr(leaf, path->slots[0],
774 			   struct btrfs_file_extent_item);
775 		btrfs_set_file_extent_type(leaf, fi,
776 					   BTRFS_FILE_EXTENT_REG);
777 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
778 	} else {
779 		fi = btrfs_item_ptr(leaf, del_slot - 1,
780 			   struct btrfs_file_extent_item);
781 		btrfs_set_file_extent_type(leaf, fi,
782 					   BTRFS_FILE_EXTENT_REG);
783 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
784 		btrfs_set_file_extent_num_bytes(leaf, fi,
785 						extent_end - key.offset);
786 
787 		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
788 		if (ret < 0) {
789 			btrfs_abort_transaction(trans, ret);
790 			goto out;
791 		}
792 	}
793 out:
794 	btrfs_free_path(path);
795 	return ret;
796 }
797 
798 /*
799  * On error return an unlocked folio and the error value
800  * On success return a locked folio and 0
801  */
802 static int prepare_uptodate_folio(struct inode *inode, struct folio *folio, u64 pos,
803 				  u64 len, bool force_uptodate)
804 {
805 	u64 clamp_start = max_t(u64, pos, folio_pos(folio));
806 	u64 clamp_end = min_t(u64, pos + len, folio_pos(folio) + folio_size(folio));
807 	int ret = 0;
808 
809 	if (folio_test_uptodate(folio))
810 		return 0;
811 
812 	if (!force_uptodate &&
813 	    IS_ALIGNED(clamp_start, PAGE_SIZE) &&
814 	    IS_ALIGNED(clamp_end, PAGE_SIZE))
815 		return 0;
816 
817 	ret = btrfs_read_folio(NULL, folio);
818 	if (ret)
819 		return ret;
820 	folio_lock(folio);
821 	if (!folio_test_uptodate(folio)) {
822 		folio_unlock(folio);
823 		return -EIO;
824 	}
825 
826 	/*
827 	 * Since btrfs_read_folio() will unlock the folio before it returns,
828 	 * there is a window where btrfs_release_folio() can be called to
829 	 * release the page.  Here we check both inode mapping and page
830 	 * private to make sure the page was not released.
831 	 *
832 	 * The private flag check is essential for subpage as we need to store
833 	 * extra bitmap using folio private.
834 	 */
835 	if (folio->mapping != inode->i_mapping || !folio_test_private(folio)) {
836 		folio_unlock(folio);
837 		return -EAGAIN;
838 	}
839 	return 0;
840 }
841 
842 static gfp_t get_prepare_gfp_flags(struct inode *inode, bool nowait)
843 {
844 	gfp_t gfp;
845 
846 	gfp = btrfs_alloc_write_mask(inode->i_mapping);
847 	if (nowait) {
848 		gfp &= ~__GFP_DIRECT_RECLAIM;
849 		gfp |= GFP_NOWAIT;
850 	}
851 
852 	return gfp;
853 }
854 
855 /*
856  * Get folio into the page cache and lock it.
857  */
858 static noinline int prepare_one_folio(struct inode *inode, struct folio **folio_ret,
859 				      loff_t pos, size_t write_bytes,
860 				      bool force_uptodate, bool nowait)
861 {
862 	unsigned long index = pos >> PAGE_SHIFT;
863 	gfp_t mask = get_prepare_gfp_flags(inode, nowait);
864 	fgf_t fgp_flags = (nowait ? FGP_WRITEBEGIN | FGP_NOWAIT : FGP_WRITEBEGIN);
865 	struct folio *folio;
866 	int ret = 0;
867 
868 again:
869 	folio = __filemap_get_folio(inode->i_mapping, index, fgp_flags, mask);
870 	if (IS_ERR(folio)) {
871 		if (nowait)
872 			ret = -EAGAIN;
873 		else
874 			ret = PTR_ERR(folio);
875 		return ret;
876 	}
877 	folio_wait_writeback(folio);
878 	/* Only support page sized folio yet. */
879 	ASSERT(folio_order(folio) == 0);
880 	ret = set_folio_extent_mapped(folio);
881 	if (ret < 0) {
882 		folio_unlock(folio);
883 		folio_put(folio);
884 		return ret;
885 	}
886 	ret = prepare_uptodate_folio(inode, folio, pos, write_bytes, force_uptodate);
887 	if (ret) {
888 		/* The folio is already unlocked. */
889 		folio_put(folio);
890 		if (!nowait && ret == -EAGAIN) {
891 			ret = 0;
892 			goto again;
893 		}
894 		return ret;
895 	}
896 	*folio_ret = folio;
897 	return 0;
898 }
899 
900 /*
901  * Locks the extent and properly waits for data=ordered extents to finish
902  * before allowing the folios to be modified if need.
903  *
904  * Return:
905  * 1 - the extent is locked
906  * 0 - the extent is not locked, and everything is OK
907  * -EAGAIN - need to prepare the folios again
908  */
909 static noinline int
910 lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct folio *folio,
911 				loff_t pos, size_t write_bytes,
912 				u64 *lockstart, u64 *lockend, bool nowait,
913 				struct extent_state **cached_state)
914 {
915 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
916 	u64 start_pos;
917 	u64 last_pos;
918 	int ret = 0;
919 
920 	start_pos = round_down(pos, fs_info->sectorsize);
921 	last_pos = round_up(pos + write_bytes, fs_info->sectorsize) - 1;
922 
923 	if (start_pos < inode->vfs_inode.i_size) {
924 		struct btrfs_ordered_extent *ordered;
925 
926 		if (nowait) {
927 			if (!try_lock_extent(&inode->io_tree, start_pos, last_pos,
928 					     cached_state)) {
929 				folio_unlock(folio);
930 				folio_put(folio);
931 				return -EAGAIN;
932 			}
933 		} else {
934 			lock_extent(&inode->io_tree, start_pos, last_pos, cached_state);
935 		}
936 
937 		ordered = btrfs_lookup_ordered_range(inode, start_pos,
938 						     last_pos - start_pos + 1);
939 		if (ordered &&
940 		    ordered->file_offset + ordered->num_bytes > start_pos &&
941 		    ordered->file_offset <= last_pos) {
942 			unlock_extent(&inode->io_tree, start_pos, last_pos,
943 				      cached_state);
944 			folio_unlock(folio);
945 			folio_put(folio);
946 			btrfs_start_ordered_extent(ordered);
947 			btrfs_put_ordered_extent(ordered);
948 			return -EAGAIN;
949 		}
950 		if (ordered)
951 			btrfs_put_ordered_extent(ordered);
952 
953 		*lockstart = start_pos;
954 		*lockend = last_pos;
955 		ret = 1;
956 	}
957 
958 	/*
959 	 * We should be called after prepare_one_folio() which should have locked
960 	 * all pages in the range.
961 	 */
962 	WARN_ON(!folio_test_locked(folio));
963 
964 	return ret;
965 }
966 
967 /*
968  * Check if we can do nocow write into the range [@pos, @pos + @write_bytes)
969  *
970  * @pos:         File offset.
971  * @write_bytes: The length to write, will be updated to the nocow writeable
972  *               range.
973  *
974  * This function will flush ordered extents in the range to ensure proper
975  * nocow checks.
976  *
977  * Return:
978  * > 0          If we can nocow, and updates @write_bytes.
979  *  0           If we can't do a nocow write.
980  * -EAGAIN      If we can't do a nocow write because snapshoting of the inode's
981  *              root is in progress.
982  * < 0          If an error happened.
983  *
984  * NOTE: Callers need to call btrfs_check_nocow_unlock() if we return > 0.
985  */
986 int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
987 			   size_t *write_bytes, bool nowait)
988 {
989 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
990 	struct btrfs_root *root = inode->root;
991 	struct extent_state *cached_state = NULL;
992 	u64 lockstart, lockend;
993 	u64 num_bytes;
994 	int ret;
995 
996 	if (!(inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
997 		return 0;
998 
999 	if (!btrfs_drew_try_write_lock(&root->snapshot_lock))
1000 		return -EAGAIN;
1001 
1002 	lockstart = round_down(pos, fs_info->sectorsize);
1003 	lockend = round_up(pos + *write_bytes,
1004 			   fs_info->sectorsize) - 1;
1005 	num_bytes = lockend - lockstart + 1;
1006 
1007 	if (nowait) {
1008 		if (!btrfs_try_lock_ordered_range(inode, lockstart, lockend,
1009 						  &cached_state)) {
1010 			btrfs_drew_write_unlock(&root->snapshot_lock);
1011 			return -EAGAIN;
1012 		}
1013 	} else {
1014 		btrfs_lock_and_flush_ordered_range(inode, lockstart, lockend,
1015 						   &cached_state);
1016 	}
1017 	ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
1018 			       NULL, nowait);
1019 	if (ret <= 0)
1020 		btrfs_drew_write_unlock(&root->snapshot_lock);
1021 	else
1022 		*write_bytes = min_t(size_t, *write_bytes ,
1023 				     num_bytes - pos + lockstart);
1024 	unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
1025 
1026 	return ret;
1027 }
1028 
1029 void btrfs_check_nocow_unlock(struct btrfs_inode *inode)
1030 {
1031 	btrfs_drew_write_unlock(&inode->root->snapshot_lock);
1032 }
1033 
1034 int btrfs_write_check(struct kiocb *iocb, size_t count)
1035 {
1036 	struct file *file = iocb->ki_filp;
1037 	struct inode *inode = file_inode(file);
1038 	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
1039 	loff_t pos = iocb->ki_pos;
1040 	int ret;
1041 	loff_t oldsize;
1042 	loff_t start_pos;
1043 
1044 	/*
1045 	 * Quickly bail out on NOWAIT writes if we don't have the nodatacow or
1046 	 * prealloc flags, as without those flags we always have to COW. We will
1047 	 * later check if we can really COW into the target range (using
1048 	 * can_nocow_extent() at btrfs_get_blocks_direct_write()).
1049 	 */
1050 	if ((iocb->ki_flags & IOCB_NOWAIT) &&
1051 	    !(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
1052 		return -EAGAIN;
1053 
1054 	ret = file_remove_privs(file);
1055 	if (ret)
1056 		return ret;
1057 
1058 	/*
1059 	 * We reserve space for updating the inode when we reserve space for the
1060 	 * extent we are going to write, so we will enospc out there.  We don't
1061 	 * need to start yet another transaction to update the inode as we will
1062 	 * update the inode when we finish writing whatever data we write.
1063 	 */
1064 	if (!IS_NOCMTIME(inode)) {
1065 		inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1066 		inode_inc_iversion(inode);
1067 	}
1068 
1069 	start_pos = round_down(pos, fs_info->sectorsize);
1070 	oldsize = i_size_read(inode);
1071 	if (start_pos > oldsize) {
1072 		/* Expand hole size to cover write data, preventing empty gap */
1073 		loff_t end_pos = round_up(pos + count, fs_info->sectorsize);
1074 
1075 		ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, end_pos);
1076 		if (ret)
1077 			return ret;
1078 	}
1079 
1080 	return 0;
1081 }
1082 
1083 ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *i)
1084 {
1085 	struct file *file = iocb->ki_filp;
1086 	loff_t pos;
1087 	struct inode *inode = file_inode(file);
1088 	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
1089 	struct extent_changeset *data_reserved = NULL;
1090 	u64 release_bytes = 0;
1091 	u64 lockstart;
1092 	u64 lockend;
1093 	size_t num_written = 0;
1094 	ssize_t ret;
1095 	loff_t old_isize = i_size_read(inode);
1096 	unsigned int ilock_flags = 0;
1097 	const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
1098 	unsigned int bdp_flags = (nowait ? BDP_ASYNC : 0);
1099 	bool only_release_metadata = false;
1100 
1101 	if (nowait)
1102 		ilock_flags |= BTRFS_ILOCK_TRY;
1103 
1104 	ret = btrfs_inode_lock(BTRFS_I(inode), ilock_flags);
1105 	if (ret < 0)
1106 		return ret;
1107 
1108 	ret = generic_write_checks(iocb, i);
1109 	if (ret <= 0)
1110 		goto out;
1111 
1112 	ret = btrfs_write_check(iocb, ret);
1113 	if (ret < 0)
1114 		goto out;
1115 
1116 	pos = iocb->ki_pos;
1117 	while (iov_iter_count(i) > 0) {
1118 		struct extent_state *cached_state = NULL;
1119 		size_t offset = offset_in_page(pos);
1120 		size_t sector_offset;
1121 		size_t write_bytes = min(iov_iter_count(i), PAGE_SIZE - offset);
1122 		size_t reserve_bytes;
1123 		size_t copied;
1124 		size_t dirty_sectors;
1125 		size_t num_sectors;
1126 		struct folio *folio = NULL;
1127 		int extents_locked;
1128 		bool force_page_uptodate = false;
1129 
1130 		/*
1131 		 * Fault pages before locking them in prepare_one_folio()
1132 		 * to avoid recursive lock
1133 		 */
1134 		if (unlikely(fault_in_iov_iter_readable(i, write_bytes))) {
1135 			ret = -EFAULT;
1136 			break;
1137 		}
1138 
1139 		only_release_metadata = false;
1140 		sector_offset = pos & (fs_info->sectorsize - 1);
1141 
1142 		extent_changeset_release(data_reserved);
1143 		ret = btrfs_check_data_free_space(BTRFS_I(inode),
1144 						  &data_reserved, pos,
1145 						  write_bytes, nowait);
1146 		if (ret < 0) {
1147 			int can_nocow;
1148 
1149 			if (nowait && (ret == -ENOSPC || ret == -EAGAIN)) {
1150 				ret = -EAGAIN;
1151 				break;
1152 			}
1153 
1154 			/*
1155 			 * If we don't have to COW at the offset, reserve
1156 			 * metadata only. write_bytes may get smaller than
1157 			 * requested here.
1158 			 */
1159 			can_nocow = btrfs_check_nocow_lock(BTRFS_I(inode), pos,
1160 							   &write_bytes, nowait);
1161 			if (can_nocow < 0)
1162 				ret = can_nocow;
1163 			if (can_nocow > 0)
1164 				ret = 0;
1165 			if (ret)
1166 				break;
1167 			only_release_metadata = true;
1168 		}
1169 
1170 		reserve_bytes = round_up(write_bytes + sector_offset,
1171 					 fs_info->sectorsize);
1172 		WARN_ON(reserve_bytes == 0);
1173 		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
1174 						      reserve_bytes,
1175 						      reserve_bytes, nowait);
1176 		if (ret) {
1177 			if (!only_release_metadata)
1178 				btrfs_free_reserved_data_space(BTRFS_I(inode),
1179 						data_reserved, pos,
1180 						write_bytes);
1181 			else
1182 				btrfs_check_nocow_unlock(BTRFS_I(inode));
1183 
1184 			if (nowait && ret == -ENOSPC)
1185 				ret = -EAGAIN;
1186 			break;
1187 		}
1188 
1189 		release_bytes = reserve_bytes;
1190 again:
1191 		ret = balance_dirty_pages_ratelimited_flags(inode->i_mapping, bdp_flags);
1192 		if (ret) {
1193 			btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1194 			break;
1195 		}
1196 
1197 		ret = prepare_one_folio(inode, &folio, pos, write_bytes,
1198 					force_page_uptodate, false);
1199 		if (ret) {
1200 			btrfs_delalloc_release_extents(BTRFS_I(inode),
1201 						       reserve_bytes);
1202 			break;
1203 		}
1204 
1205 		extents_locked = lock_and_cleanup_extent_if_need(BTRFS_I(inode),
1206 						folio, pos, write_bytes, &lockstart,
1207 						&lockend, nowait, &cached_state);
1208 		if (extents_locked < 0) {
1209 			if (!nowait && extents_locked == -EAGAIN)
1210 				goto again;
1211 
1212 			btrfs_delalloc_release_extents(BTRFS_I(inode),
1213 						       reserve_bytes);
1214 			ret = extents_locked;
1215 			break;
1216 		}
1217 
1218 		copied = copy_folio_from_iter_atomic(folio,
1219 				offset_in_folio(folio, pos), write_bytes, i);
1220 		flush_dcache_folio(folio);
1221 
1222 		/*
1223 		 * If we get a partial write, we can end up with partially
1224 		 * uptodate page. Although if sector size < page size we can
1225 		 * handle it, but if it's not sector aligned it can cause
1226 		 * a lot of complexity, so make sure they don't happen by
1227 		 * forcing retry this copy.
1228 		 */
1229 		if (unlikely(copied < write_bytes)) {
1230 			if (!folio_test_uptodate(folio)) {
1231 				iov_iter_revert(i, copied);
1232 				copied = 0;
1233 			}
1234 		}
1235 
1236 		num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
1237 		dirty_sectors = round_up(copied + sector_offset,
1238 					fs_info->sectorsize);
1239 		dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
1240 
1241 		if (copied == 0) {
1242 			force_page_uptodate = true;
1243 			dirty_sectors = 0;
1244 		} else {
1245 			force_page_uptodate = false;
1246 		}
1247 
1248 		if (num_sectors > dirty_sectors) {
1249 			/* release everything except the sectors we dirtied */
1250 			release_bytes -= dirty_sectors << fs_info->sectorsize_bits;
1251 			if (only_release_metadata) {
1252 				btrfs_delalloc_release_metadata(BTRFS_I(inode),
1253 							release_bytes, true);
1254 			} else {
1255 				u64 release_start = round_up(pos + copied,
1256 							     fs_info->sectorsize);
1257 				btrfs_delalloc_release_space(BTRFS_I(inode),
1258 						data_reserved, release_start,
1259 						release_bytes, true);
1260 			}
1261 		}
1262 
1263 		release_bytes = round_up(copied + sector_offset,
1264 					fs_info->sectorsize);
1265 
1266 		ret = btrfs_dirty_folio(BTRFS_I(inode), folio, pos, copied,
1267 					&cached_state, only_release_metadata);
1268 
1269 		/*
1270 		 * If we have not locked the extent range, because the range's
1271 		 * start offset is >= i_size, we might still have a non-NULL
1272 		 * cached extent state, acquired while marking the extent range
1273 		 * as delalloc through btrfs_dirty_page(). Therefore free any
1274 		 * possible cached extent state to avoid a memory leak.
1275 		 */
1276 		if (extents_locked)
1277 			unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
1278 				      lockend, &cached_state);
1279 		else
1280 			free_extent_state(cached_state);
1281 
1282 		btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1283 		if (ret) {
1284 			btrfs_drop_folio(fs_info, folio, pos, copied);
1285 			break;
1286 		}
1287 
1288 		release_bytes = 0;
1289 		if (only_release_metadata)
1290 			btrfs_check_nocow_unlock(BTRFS_I(inode));
1291 
1292 		btrfs_drop_folio(fs_info, folio, pos, copied);
1293 
1294 		cond_resched();
1295 
1296 		pos += copied;
1297 		num_written += copied;
1298 	}
1299 
1300 	if (release_bytes) {
1301 		if (only_release_metadata) {
1302 			btrfs_check_nocow_unlock(BTRFS_I(inode));
1303 			btrfs_delalloc_release_metadata(BTRFS_I(inode),
1304 					release_bytes, true);
1305 		} else {
1306 			btrfs_delalloc_release_space(BTRFS_I(inode),
1307 					data_reserved,
1308 					round_down(pos, fs_info->sectorsize),
1309 					release_bytes, true);
1310 		}
1311 	}
1312 
1313 	extent_changeset_free(data_reserved);
1314 	if (num_written > 0) {
1315 		pagecache_isize_extended(inode, old_isize, iocb->ki_pos);
1316 		iocb->ki_pos += num_written;
1317 	}
1318 out:
1319 	btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1320 	return num_written ? num_written : ret;
1321 }
1322 
1323 static ssize_t btrfs_encoded_write(struct kiocb *iocb, struct iov_iter *from,
1324 			const struct btrfs_ioctl_encoded_io_args *encoded)
1325 {
1326 	struct file *file = iocb->ki_filp;
1327 	struct inode *inode = file_inode(file);
1328 	loff_t count;
1329 	ssize_t ret;
1330 
1331 	btrfs_inode_lock(BTRFS_I(inode), 0);
1332 	count = encoded->len;
1333 	ret = generic_write_checks_count(iocb, &count);
1334 	if (ret == 0 && count != encoded->len) {
1335 		/*
1336 		 * The write got truncated by generic_write_checks_count(). We
1337 		 * can't do a partial encoded write.
1338 		 */
1339 		ret = -EFBIG;
1340 	}
1341 	if (ret || encoded->len == 0)
1342 		goto out;
1343 
1344 	ret = btrfs_write_check(iocb, encoded->len);
1345 	if (ret < 0)
1346 		goto out;
1347 
1348 	ret = btrfs_do_encoded_write(iocb, from, encoded);
1349 out:
1350 	btrfs_inode_unlock(BTRFS_I(inode), 0);
1351 	return ret;
1352 }
1353 
1354 ssize_t btrfs_do_write_iter(struct kiocb *iocb, struct iov_iter *from,
1355 			    const struct btrfs_ioctl_encoded_io_args *encoded)
1356 {
1357 	struct file *file = iocb->ki_filp;
1358 	struct btrfs_inode *inode = BTRFS_I(file_inode(file));
1359 	ssize_t num_written, num_sync;
1360 
1361 	/*
1362 	 * If the fs flips readonly due to some impossible error, although we
1363 	 * have opened a file as writable, we have to stop this write operation
1364 	 * to ensure consistency.
1365 	 */
1366 	if (BTRFS_FS_ERROR(inode->root->fs_info))
1367 		return -EROFS;
1368 
1369 	if (encoded && (iocb->ki_flags & IOCB_NOWAIT))
1370 		return -EOPNOTSUPP;
1371 
1372 	if (encoded) {
1373 		num_written = btrfs_encoded_write(iocb, from, encoded);
1374 		num_sync = encoded->len;
1375 	} else if (iocb->ki_flags & IOCB_DIRECT) {
1376 		num_written = btrfs_direct_write(iocb, from);
1377 		num_sync = num_written;
1378 	} else {
1379 		num_written = btrfs_buffered_write(iocb, from);
1380 		num_sync = num_written;
1381 	}
1382 
1383 	btrfs_set_inode_last_sub_trans(inode);
1384 
1385 	if (num_sync > 0) {
1386 		num_sync = generic_write_sync(iocb, num_sync);
1387 		if (num_sync < 0)
1388 			num_written = num_sync;
1389 	}
1390 
1391 	return num_written;
1392 }
1393 
1394 static ssize_t btrfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1395 {
1396 	return btrfs_do_write_iter(iocb, from, NULL);
1397 }
1398 
1399 int btrfs_release_file(struct inode *inode, struct file *filp)
1400 {
1401 	struct btrfs_file_private *private = filp->private_data;
1402 
1403 	if (private) {
1404 		kfree(private->filldir_buf);
1405 		free_extent_state(private->llseek_cached_state);
1406 		kfree(private);
1407 		filp->private_data = NULL;
1408 	}
1409 
1410 	/*
1411 	 * Set by setattr when we are about to truncate a file from a non-zero
1412 	 * size to a zero size.  This tries to flush down new bytes that may
1413 	 * have been written if the application were using truncate to replace
1414 	 * a file in place.
1415 	 */
1416 	if (test_and_clear_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
1417 			       &BTRFS_I(inode)->runtime_flags))
1418 			filemap_flush(inode->i_mapping);
1419 	return 0;
1420 }
1421 
1422 static int start_ordered_ops(struct btrfs_inode *inode, loff_t start, loff_t end)
1423 {
1424 	int ret;
1425 	struct blk_plug plug;
1426 
1427 	/*
1428 	 * This is only called in fsync, which would do synchronous writes, so
1429 	 * a plug can merge adjacent IOs as much as possible.  Esp. in case of
1430 	 * multiple disks using raid profile, a large IO can be split to
1431 	 * several segments of stripe length (currently 64K).
1432 	 */
1433 	blk_start_plug(&plug);
1434 	ret = btrfs_fdatawrite_range(inode, start, end);
1435 	blk_finish_plug(&plug);
1436 
1437 	return ret;
1438 }
1439 
1440 static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
1441 {
1442 	struct btrfs_inode *inode = ctx->inode;
1443 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1444 
1445 	if (btrfs_inode_in_log(inode, btrfs_get_fs_generation(fs_info)) &&
1446 	    list_empty(&ctx->ordered_extents))
1447 		return true;
1448 
1449 	/*
1450 	 * If we are doing a fast fsync we can not bail out if the inode's
1451 	 * last_trans is <= then the last committed transaction, because we only
1452 	 * update the last_trans of the inode during ordered extent completion,
1453 	 * and for a fast fsync we don't wait for that, we only wait for the
1454 	 * writeback to complete.
1455 	 */
1456 	if (inode->last_trans <= btrfs_get_last_trans_committed(fs_info) &&
1457 	    (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) ||
1458 	     list_empty(&ctx->ordered_extents)))
1459 		return true;
1460 
1461 	return false;
1462 }
1463 
1464 /*
1465  * fsync call for both files and directories.  This logs the inode into
1466  * the tree log instead of forcing full commits whenever possible.
1467  *
1468  * It needs to call filemap_fdatawait so that all ordered extent updates are
1469  * in the metadata btree are up to date for copying to the log.
1470  *
1471  * It drops the inode mutex before doing the tree log commit.  This is an
1472  * important optimization for directories because holding the mutex prevents
1473  * new operations on the dir while we write to disk.
1474  */
1475 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1476 {
1477 	struct dentry *dentry = file_dentry(file);
1478 	struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
1479 	struct btrfs_root *root = inode->root;
1480 	struct btrfs_fs_info *fs_info = root->fs_info;
1481 	struct btrfs_trans_handle *trans;
1482 	struct btrfs_log_ctx ctx;
1483 	int ret = 0, err;
1484 	u64 len;
1485 	bool full_sync;
1486 	bool skip_ilock = false;
1487 
1488 	if (current->journal_info == BTRFS_TRANS_DIO_WRITE_STUB) {
1489 		skip_ilock = true;
1490 		current->journal_info = NULL;
1491 		btrfs_assert_inode_locked(inode);
1492 	}
1493 
1494 	trace_btrfs_sync_file(file, datasync);
1495 
1496 	btrfs_init_log_ctx(&ctx, inode);
1497 
1498 	/*
1499 	 * Always set the range to a full range, otherwise we can get into
1500 	 * several problems, from missing file extent items to represent holes
1501 	 * when not using the NO_HOLES feature, to log tree corruption due to
1502 	 * races between hole detection during logging and completion of ordered
1503 	 * extents outside the range, to missing checksums due to ordered extents
1504 	 * for which we flushed only a subset of their pages.
1505 	 */
1506 	start = 0;
1507 	end = LLONG_MAX;
1508 	len = (u64)LLONG_MAX + 1;
1509 
1510 	/*
1511 	 * We write the dirty pages in the range and wait until they complete
1512 	 * out of the ->i_mutex. If so, we can flush the dirty pages by
1513 	 * multi-task, and make the performance up.  See
1514 	 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
1515 	 */
1516 	ret = start_ordered_ops(inode, start, end);
1517 	if (ret)
1518 		goto out;
1519 
1520 	if (skip_ilock)
1521 		down_write(&inode->i_mmap_lock);
1522 	else
1523 		btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
1524 
1525 	atomic_inc(&root->log_batch);
1526 
1527 	/*
1528 	 * Before we acquired the inode's lock and the mmap lock, someone may
1529 	 * have dirtied more pages in the target range. We need to make sure
1530 	 * that writeback for any such pages does not start while we are logging
1531 	 * the inode, because if it does, any of the following might happen when
1532 	 * we are not doing a full inode sync:
1533 	 *
1534 	 * 1) We log an extent after its writeback finishes but before its
1535 	 *    checksums are added to the csum tree, leading to -EIO errors
1536 	 *    when attempting to read the extent after a log replay.
1537 	 *
1538 	 * 2) We can end up logging an extent before its writeback finishes.
1539 	 *    Therefore after the log replay we will have a file extent item
1540 	 *    pointing to an unwritten extent (and no data checksums as well).
1541 	 *
1542 	 * So trigger writeback for any eventual new dirty pages and then we
1543 	 * wait for all ordered extents to complete below.
1544 	 */
1545 	ret = start_ordered_ops(inode, start, end);
1546 	if (ret) {
1547 		if (skip_ilock)
1548 			up_write(&inode->i_mmap_lock);
1549 		else
1550 			btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
1551 		goto out;
1552 	}
1553 
1554 	/*
1555 	 * Always check for the full sync flag while holding the inode's lock,
1556 	 * to avoid races with other tasks. The flag must be either set all the
1557 	 * time during logging or always off all the time while logging.
1558 	 * We check the flag here after starting delalloc above, because when
1559 	 * running delalloc the full sync flag may be set if we need to drop
1560 	 * extra extent map ranges due to temporary memory allocation failures.
1561 	 */
1562 	full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
1563 
1564 	/*
1565 	 * We have to do this here to avoid the priority inversion of waiting on
1566 	 * IO of a lower priority task while holding a transaction open.
1567 	 *
1568 	 * For a full fsync we wait for the ordered extents to complete while
1569 	 * for a fast fsync we wait just for writeback to complete, and then
1570 	 * attach the ordered extents to the transaction so that a transaction
1571 	 * commit waits for their completion, to avoid data loss if we fsync,
1572 	 * the current transaction commits before the ordered extents complete
1573 	 * and a power failure happens right after that.
1574 	 *
1575 	 * For zoned filesystem, if a write IO uses a ZONE_APPEND command, the
1576 	 * logical address recorded in the ordered extent may change. We need
1577 	 * to wait for the IO to stabilize the logical address.
1578 	 */
1579 	if (full_sync || btrfs_is_zoned(fs_info)) {
1580 		ret = btrfs_wait_ordered_range(inode, start, len);
1581 		clear_bit(BTRFS_INODE_COW_WRITE_ERROR, &inode->runtime_flags);
1582 	} else {
1583 		/*
1584 		 * Get our ordered extents as soon as possible to avoid doing
1585 		 * checksum lookups in the csum tree, and use instead the
1586 		 * checksums attached to the ordered extents.
1587 		 */
1588 		btrfs_get_ordered_extents_for_logging(inode, &ctx.ordered_extents);
1589 		ret = filemap_fdatawait_range(inode->vfs_inode.i_mapping, start, end);
1590 		if (ret)
1591 			goto out_release_extents;
1592 
1593 		/*
1594 		 * Check and clear the BTRFS_INODE_COW_WRITE_ERROR now after
1595 		 * starting and waiting for writeback, because for buffered IO
1596 		 * it may have been set during the end IO callback
1597 		 * (end_bbio_data_write() -> btrfs_finish_ordered_extent()) in
1598 		 * case an error happened and we need to wait for ordered
1599 		 * extents to complete so that any extent maps that point to
1600 		 * unwritten locations are dropped and we don't log them.
1601 		 */
1602 		if (test_and_clear_bit(BTRFS_INODE_COW_WRITE_ERROR, &inode->runtime_flags))
1603 			ret = btrfs_wait_ordered_range(inode, start, len);
1604 	}
1605 
1606 	if (ret)
1607 		goto out_release_extents;
1608 
1609 	atomic_inc(&root->log_batch);
1610 
1611 	if (skip_inode_logging(&ctx)) {
1612 		/*
1613 		 * We've had everything committed since the last time we were
1614 		 * modified so clear this flag in case it was set for whatever
1615 		 * reason, it's no longer relevant.
1616 		 */
1617 		clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
1618 		/*
1619 		 * An ordered extent might have started before and completed
1620 		 * already with io errors, in which case the inode was not
1621 		 * updated and we end up here. So check the inode's mapping
1622 		 * for any errors that might have happened since we last
1623 		 * checked called fsync.
1624 		 */
1625 		ret = filemap_check_wb_err(inode->vfs_inode.i_mapping, file->f_wb_err);
1626 		goto out_release_extents;
1627 	}
1628 
1629 	btrfs_init_log_ctx_scratch_eb(&ctx);
1630 
1631 	/*
1632 	 * We use start here because we will need to wait on the IO to complete
1633 	 * in btrfs_sync_log, which could require joining a transaction (for
1634 	 * example checking cross references in the nocow path).  If we use join
1635 	 * here we could get into a situation where we're waiting on IO to
1636 	 * happen that is blocked on a transaction trying to commit.  With start
1637 	 * we inc the extwriter counter, so we wait for all extwriters to exit
1638 	 * before we start blocking joiners.  This comment is to keep somebody
1639 	 * from thinking they are super smart and changing this to
1640 	 * btrfs_join_transaction *cough*Josef*cough*.
1641 	 */
1642 	trans = btrfs_start_transaction(root, 0);
1643 	if (IS_ERR(trans)) {
1644 		ret = PTR_ERR(trans);
1645 		goto out_release_extents;
1646 	}
1647 	trans->in_fsync = true;
1648 
1649 	ret = btrfs_log_dentry_safe(trans, dentry, &ctx);
1650 	/*
1651 	 * Scratch eb no longer needed, release before syncing log or commit
1652 	 * transaction, to avoid holding unnecessary memory during such long
1653 	 * operations.
1654 	 */
1655 	if (ctx.scratch_eb) {
1656 		free_extent_buffer(ctx.scratch_eb);
1657 		ctx.scratch_eb = NULL;
1658 	}
1659 	btrfs_release_log_ctx_extents(&ctx);
1660 	if (ret < 0) {
1661 		/* Fallthrough and commit/free transaction. */
1662 		ret = BTRFS_LOG_FORCE_COMMIT;
1663 	}
1664 
1665 	/* we've logged all the items and now have a consistent
1666 	 * version of the file in the log.  It is possible that
1667 	 * someone will come in and modify the file, but that's
1668 	 * fine because the log is consistent on disk, and we
1669 	 * have references to all of the file's extents
1670 	 *
1671 	 * It is possible that someone will come in and log the
1672 	 * file again, but that will end up using the synchronization
1673 	 * inside btrfs_sync_log to keep things safe.
1674 	 */
1675 	if (skip_ilock)
1676 		up_write(&inode->i_mmap_lock);
1677 	else
1678 		btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
1679 
1680 	if (ret == BTRFS_NO_LOG_SYNC) {
1681 		ret = btrfs_end_transaction(trans);
1682 		goto out;
1683 	}
1684 
1685 	/* We successfully logged the inode, attempt to sync the log. */
1686 	if (!ret) {
1687 		ret = btrfs_sync_log(trans, root, &ctx);
1688 		if (!ret) {
1689 			ret = btrfs_end_transaction(trans);
1690 			goto out;
1691 		}
1692 	}
1693 
1694 	/*
1695 	 * At this point we need to commit the transaction because we had
1696 	 * btrfs_need_log_full_commit() or some other error.
1697 	 *
1698 	 * If we didn't do a full sync we have to stop the trans handle, wait on
1699 	 * the ordered extents, start it again and commit the transaction.  If
1700 	 * we attempt to wait on the ordered extents here we could deadlock with
1701 	 * something like fallocate() that is holding the extent lock trying to
1702 	 * start a transaction while some other thread is trying to commit the
1703 	 * transaction while we (fsync) are currently holding the transaction
1704 	 * open.
1705 	 */
1706 	if (!full_sync) {
1707 		ret = btrfs_end_transaction(trans);
1708 		if (ret)
1709 			goto out;
1710 		ret = btrfs_wait_ordered_range(inode, start, len);
1711 		if (ret)
1712 			goto out;
1713 
1714 		/*
1715 		 * This is safe to use here because we're only interested in
1716 		 * making sure the transaction that had the ordered extents is
1717 		 * committed.  We aren't waiting on anything past this point,
1718 		 * we're purely getting the transaction and committing it.
1719 		 */
1720 		trans = btrfs_attach_transaction_barrier(root);
1721 		if (IS_ERR(trans)) {
1722 			ret = PTR_ERR(trans);
1723 
1724 			/*
1725 			 * We committed the transaction and there's no currently
1726 			 * running transaction, this means everything we care
1727 			 * about made it to disk and we are done.
1728 			 */
1729 			if (ret == -ENOENT)
1730 				ret = 0;
1731 			goto out;
1732 		}
1733 	}
1734 
1735 	ret = btrfs_commit_transaction(trans);
1736 out:
1737 	free_extent_buffer(ctx.scratch_eb);
1738 	ASSERT(list_empty(&ctx.list));
1739 	ASSERT(list_empty(&ctx.conflict_inodes));
1740 	err = file_check_and_advance_wb_err(file);
1741 	if (!ret)
1742 		ret = err;
1743 	return ret > 0 ? -EIO : ret;
1744 
1745 out_release_extents:
1746 	btrfs_release_log_ctx_extents(&ctx);
1747 	if (skip_ilock)
1748 		up_write(&inode->i_mmap_lock);
1749 	else
1750 		btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
1751 	goto out;
1752 }
1753 
1754 /*
1755  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
1756  * called from a page fault handler when a page is first dirtied. Hence we must
1757  * be careful to check for EOF conditions here. We set the page up correctly
1758  * for a written page which means we get ENOSPC checking when writing into
1759  * holes and correct delalloc and unwritten extent mapping on filesystems that
1760  * support these features.
1761  *
1762  * We are not allowed to take the i_mutex here so we have to play games to
1763  * protect against truncate races as the page could now be beyond EOF.  Because
1764  * truncate_setsize() writes the inode size before removing pages, once we have
1765  * the page lock we can determine safely if the page is beyond EOF. If it is not
1766  * beyond EOF, then the page is guaranteed safe against truncation until we
1767  * unlock the page.
1768  */
1769 static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
1770 {
1771 	struct page *page = vmf->page;
1772 	struct folio *folio = page_folio(page);
1773 	struct inode *inode = file_inode(vmf->vma->vm_file);
1774 	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
1775 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1776 	struct btrfs_ordered_extent *ordered;
1777 	struct extent_state *cached_state = NULL;
1778 	struct extent_changeset *data_reserved = NULL;
1779 	unsigned long zero_start;
1780 	loff_t size;
1781 	vm_fault_t ret;
1782 	int ret2;
1783 	int reserved = 0;
1784 	u64 reserved_space;
1785 	u64 page_start;
1786 	u64 page_end;
1787 	u64 end;
1788 
1789 	ASSERT(folio_order(folio) == 0);
1790 
1791 	reserved_space = PAGE_SIZE;
1792 
1793 	sb_start_pagefault(inode->i_sb);
1794 	page_start = folio_pos(folio);
1795 	page_end = page_start + folio_size(folio) - 1;
1796 	end = page_end;
1797 
1798 	/*
1799 	 * Reserving delalloc space after obtaining the page lock can lead to
1800 	 * deadlock. For example, if a dirty page is locked by this function
1801 	 * and the call to btrfs_delalloc_reserve_space() ends up triggering
1802 	 * dirty page write out, then the btrfs_writepages() function could
1803 	 * end up waiting indefinitely to get a lock on the page currently
1804 	 * being processed by btrfs_page_mkwrite() function.
1805 	 */
1806 	ret2 = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved,
1807 					    page_start, reserved_space);
1808 	if (!ret2) {
1809 		ret2 = file_update_time(vmf->vma->vm_file);
1810 		reserved = 1;
1811 	}
1812 	if (ret2) {
1813 		ret = vmf_error(ret2);
1814 		if (reserved)
1815 			goto out;
1816 		goto out_noreserve;
1817 	}
1818 
1819 	/* Make the VM retry the fault. */
1820 	ret = VM_FAULT_NOPAGE;
1821 again:
1822 	down_read(&BTRFS_I(inode)->i_mmap_lock);
1823 	folio_lock(folio);
1824 	size = i_size_read(inode);
1825 
1826 	if ((folio->mapping != inode->i_mapping) ||
1827 	    (page_start >= size)) {
1828 		/* Page got truncated out from underneath us. */
1829 		goto out_unlock;
1830 	}
1831 	folio_wait_writeback(folio);
1832 
1833 	lock_extent(io_tree, page_start, page_end, &cached_state);
1834 	ret2 = set_folio_extent_mapped(folio);
1835 	if (ret2 < 0) {
1836 		ret = vmf_error(ret2);
1837 		unlock_extent(io_tree, page_start, page_end, &cached_state);
1838 		goto out_unlock;
1839 	}
1840 
1841 	/*
1842 	 * We can't set the delalloc bits if there are pending ordered
1843 	 * extents.  Drop our locks and wait for them to finish.
1844 	 */
1845 	ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, PAGE_SIZE);
1846 	if (ordered) {
1847 		unlock_extent(io_tree, page_start, page_end, &cached_state);
1848 		folio_unlock(folio);
1849 		up_read(&BTRFS_I(inode)->i_mmap_lock);
1850 		btrfs_start_ordered_extent(ordered);
1851 		btrfs_put_ordered_extent(ordered);
1852 		goto again;
1853 	}
1854 
1855 	if (folio->index == ((size - 1) >> PAGE_SHIFT)) {
1856 		reserved_space = round_up(size - page_start, fs_info->sectorsize);
1857 		if (reserved_space < PAGE_SIZE) {
1858 			end = page_start + reserved_space - 1;
1859 			btrfs_delalloc_release_space(BTRFS_I(inode),
1860 					data_reserved, page_start,
1861 					PAGE_SIZE - reserved_space, true);
1862 		}
1863 	}
1864 
1865 	/*
1866 	 * page_mkwrite gets called when the page is firstly dirtied after it's
1867 	 * faulted in, but write(2) could also dirty a page and set delalloc
1868 	 * bits, thus in this case for space account reason, we still need to
1869 	 * clear any delalloc bits within this page range since we have to
1870 	 * reserve data&meta space before lock_page() (see above comments).
1871 	 */
1872 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
1873 			  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
1874 			  EXTENT_DEFRAG, &cached_state);
1875 
1876 	ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0,
1877 					&cached_state);
1878 	if (ret2) {
1879 		unlock_extent(io_tree, page_start, page_end, &cached_state);
1880 		ret = VM_FAULT_SIGBUS;
1881 		goto out_unlock;
1882 	}
1883 
1884 	/* Page is wholly or partially inside EOF. */
1885 	if (page_start + folio_size(folio) > size)
1886 		zero_start = offset_in_folio(folio, size);
1887 	else
1888 		zero_start = PAGE_SIZE;
1889 
1890 	if (zero_start != PAGE_SIZE)
1891 		folio_zero_range(folio, zero_start, folio_size(folio) - zero_start);
1892 
1893 	btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE);
1894 	btrfs_folio_set_dirty(fs_info, folio, page_start, end + 1 - page_start);
1895 	btrfs_folio_set_uptodate(fs_info, folio, page_start, end + 1 - page_start);
1896 
1897 	btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
1898 
1899 	unlock_extent(io_tree, page_start, page_end, &cached_state);
1900 	up_read(&BTRFS_I(inode)->i_mmap_lock);
1901 
1902 	btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
1903 	sb_end_pagefault(inode->i_sb);
1904 	extent_changeset_free(data_reserved);
1905 	return VM_FAULT_LOCKED;
1906 
1907 out_unlock:
1908 	folio_unlock(folio);
1909 	up_read(&BTRFS_I(inode)->i_mmap_lock);
1910 out:
1911 	btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
1912 	btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start,
1913 				     reserved_space, (ret != 0));
1914 out_noreserve:
1915 	sb_end_pagefault(inode->i_sb);
1916 	extent_changeset_free(data_reserved);
1917 	return ret;
1918 }
1919 
1920 static const struct vm_operations_struct btrfs_file_vm_ops = {
1921 	.fault		= filemap_fault,
1922 	.map_pages	= filemap_map_pages,
1923 	.page_mkwrite	= btrfs_page_mkwrite,
1924 };
1925 
1926 static int btrfs_file_mmap(struct file	*filp, struct vm_area_struct *vma)
1927 {
1928 	struct address_space *mapping = filp->f_mapping;
1929 
1930 	if (!mapping->a_ops->read_folio)
1931 		return -ENOEXEC;
1932 
1933 	file_accessed(filp);
1934 	vma->vm_ops = &btrfs_file_vm_ops;
1935 
1936 	return 0;
1937 }
1938 
1939 static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
1940 			  int slot, u64 start, u64 end)
1941 {
1942 	struct btrfs_file_extent_item *fi;
1943 	struct btrfs_key key;
1944 
1945 	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1946 		return 0;
1947 
1948 	btrfs_item_key_to_cpu(leaf, &key, slot);
1949 	if (key.objectid != btrfs_ino(inode) ||
1950 	    key.type != BTRFS_EXTENT_DATA_KEY)
1951 		return 0;
1952 
1953 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1954 
1955 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
1956 		return 0;
1957 
1958 	if (btrfs_file_extent_disk_bytenr(leaf, fi))
1959 		return 0;
1960 
1961 	if (key.offset == end)
1962 		return 1;
1963 	if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
1964 		return 1;
1965 	return 0;
1966 }
1967 
1968 static int fill_holes(struct btrfs_trans_handle *trans,
1969 		struct btrfs_inode *inode,
1970 		struct btrfs_path *path, u64 offset, u64 end)
1971 {
1972 	struct btrfs_fs_info *fs_info = trans->fs_info;
1973 	struct btrfs_root *root = inode->root;
1974 	struct extent_buffer *leaf;
1975 	struct btrfs_file_extent_item *fi;
1976 	struct extent_map *hole_em;
1977 	struct btrfs_key key;
1978 	int ret;
1979 
1980 	if (btrfs_fs_incompat(fs_info, NO_HOLES))
1981 		goto out;
1982 
1983 	key.objectid = btrfs_ino(inode);
1984 	key.type = BTRFS_EXTENT_DATA_KEY;
1985 	key.offset = offset;
1986 
1987 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1988 	if (ret <= 0) {
1989 		/*
1990 		 * We should have dropped this offset, so if we find it then
1991 		 * something has gone horribly wrong.
1992 		 */
1993 		if (ret == 0)
1994 			ret = -EINVAL;
1995 		return ret;
1996 	}
1997 
1998 	leaf = path->nodes[0];
1999 	if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) {
2000 		u64 num_bytes;
2001 
2002 		path->slots[0]--;
2003 		fi = btrfs_item_ptr(leaf, path->slots[0],
2004 				    struct btrfs_file_extent_item);
2005 		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2006 			end - offset;
2007 		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2008 		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2009 		btrfs_set_file_extent_offset(leaf, fi, 0);
2010 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2011 		goto out;
2012 	}
2013 
2014 	if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
2015 		u64 num_bytes;
2016 
2017 		key.offset = offset;
2018 		btrfs_set_item_key_safe(trans, path, &key);
2019 		fi = btrfs_item_ptr(leaf, path->slots[0],
2020 				    struct btrfs_file_extent_item);
2021 		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2022 			offset;
2023 		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2024 		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2025 		btrfs_set_file_extent_offset(leaf, fi, 0);
2026 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2027 		goto out;
2028 	}
2029 	btrfs_release_path(path);
2030 
2031 	ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset,
2032 				       end - offset);
2033 	if (ret)
2034 		return ret;
2035 
2036 out:
2037 	btrfs_release_path(path);
2038 
2039 	hole_em = alloc_extent_map();
2040 	if (!hole_em) {
2041 		btrfs_drop_extent_map_range(inode, offset, end - 1, false);
2042 		btrfs_set_inode_full_sync(inode);
2043 	} else {
2044 		hole_em->start = offset;
2045 		hole_em->len = end - offset;
2046 		hole_em->ram_bytes = hole_em->len;
2047 
2048 		hole_em->disk_bytenr = EXTENT_MAP_HOLE;
2049 		hole_em->disk_num_bytes = 0;
2050 		hole_em->generation = trans->transid;
2051 
2052 		ret = btrfs_replace_extent_map_range(inode, hole_em, true);
2053 		free_extent_map(hole_em);
2054 		if (ret)
2055 			btrfs_set_inode_full_sync(inode);
2056 	}
2057 
2058 	return 0;
2059 }
2060 
2061 /*
2062  * Find a hole extent on given inode and change start/len to the end of hole
2063  * extent.(hole/vacuum extent whose em->start <= start &&
2064  *	   em->start + em->len > start)
2065  * When a hole extent is found, return 1 and modify start/len.
2066  */
2067 static int find_first_non_hole(struct btrfs_inode *inode, u64 *start, u64 *len)
2068 {
2069 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2070 	struct extent_map *em;
2071 	int ret = 0;
2072 
2073 	em = btrfs_get_extent(inode, NULL,
2074 			      round_down(*start, fs_info->sectorsize),
2075 			      round_up(*len, fs_info->sectorsize));
2076 	if (IS_ERR(em))
2077 		return PTR_ERR(em);
2078 
2079 	/* Hole or vacuum extent(only exists in no-hole mode) */
2080 	if (em->disk_bytenr == EXTENT_MAP_HOLE) {
2081 		ret = 1;
2082 		*len = em->start + em->len > *start + *len ?
2083 		       0 : *start + *len - em->start - em->len;
2084 		*start = em->start + em->len;
2085 	}
2086 	free_extent_map(em);
2087 	return ret;
2088 }
2089 
2090 static void btrfs_punch_hole_lock_range(struct inode *inode,
2091 					const u64 lockstart,
2092 					const u64 lockend,
2093 					struct extent_state **cached_state)
2094 {
2095 	/*
2096 	 * For subpage case, if the range is not at page boundary, we could
2097 	 * have pages at the leading/tailing part of the range.
2098 	 * This could lead to dead loop since filemap_range_has_page()
2099 	 * will always return true.
2100 	 * So here we need to do extra page alignment for
2101 	 * filemap_range_has_page().
2102 	 */
2103 	const u64 page_lockstart = round_up(lockstart, PAGE_SIZE);
2104 	const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1;
2105 
2106 	while (1) {
2107 		truncate_pagecache_range(inode, lockstart, lockend);
2108 
2109 		lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2110 			    cached_state);
2111 		/*
2112 		 * We can't have ordered extents in the range, nor dirty/writeback
2113 		 * pages, because we have locked the inode's VFS lock in exclusive
2114 		 * mode, we have locked the inode's i_mmap_lock in exclusive mode,
2115 		 * we have flushed all delalloc in the range and we have waited
2116 		 * for any ordered extents in the range to complete.
2117 		 * We can race with anyone reading pages from this range, so after
2118 		 * locking the range check if we have pages in the range, and if
2119 		 * we do, unlock the range and retry.
2120 		 */
2121 		if (!filemap_range_has_page(inode->i_mapping, page_lockstart,
2122 					    page_lockend))
2123 			break;
2124 
2125 		unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2126 			      cached_state);
2127 	}
2128 
2129 	btrfs_assert_inode_range_clean(BTRFS_I(inode), lockstart, lockend);
2130 }
2131 
2132 static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
2133 				     struct btrfs_inode *inode,
2134 				     struct btrfs_path *path,
2135 				     struct btrfs_replace_extent_info *extent_info,
2136 				     const u64 replace_len,
2137 				     const u64 bytes_to_drop)
2138 {
2139 	struct btrfs_fs_info *fs_info = trans->fs_info;
2140 	struct btrfs_root *root = inode->root;
2141 	struct btrfs_file_extent_item *extent;
2142 	struct extent_buffer *leaf;
2143 	struct btrfs_key key;
2144 	int slot;
2145 	int ret;
2146 
2147 	if (replace_len == 0)
2148 		return 0;
2149 
2150 	if (extent_info->disk_offset == 0 &&
2151 	    btrfs_fs_incompat(fs_info, NO_HOLES)) {
2152 		btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
2153 		return 0;
2154 	}
2155 
2156 	key.objectid = btrfs_ino(inode);
2157 	key.type = BTRFS_EXTENT_DATA_KEY;
2158 	key.offset = extent_info->file_offset;
2159 	ret = btrfs_insert_empty_item(trans, root, path, &key,
2160 				      sizeof(struct btrfs_file_extent_item));
2161 	if (ret)
2162 		return ret;
2163 	leaf = path->nodes[0];
2164 	slot = path->slots[0];
2165 	write_extent_buffer(leaf, extent_info->extent_buf,
2166 			    btrfs_item_ptr_offset(leaf, slot),
2167 			    sizeof(struct btrfs_file_extent_item));
2168 	extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2169 	ASSERT(btrfs_file_extent_type(leaf, extent) != BTRFS_FILE_EXTENT_INLINE);
2170 	btrfs_set_file_extent_offset(leaf, extent, extent_info->data_offset);
2171 	btrfs_set_file_extent_num_bytes(leaf, extent, replace_len);
2172 	if (extent_info->is_new_extent)
2173 		btrfs_set_file_extent_generation(leaf, extent, trans->transid);
2174 	btrfs_release_path(path);
2175 
2176 	ret = btrfs_inode_set_file_extent_range(inode, extent_info->file_offset,
2177 						replace_len);
2178 	if (ret)
2179 		return ret;
2180 
2181 	/* If it's a hole, nothing more needs to be done. */
2182 	if (extent_info->disk_offset == 0) {
2183 		btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
2184 		return 0;
2185 	}
2186 
2187 	btrfs_update_inode_bytes(inode, replace_len, bytes_to_drop);
2188 
2189 	if (extent_info->is_new_extent && extent_info->insertions == 0) {
2190 		key.objectid = extent_info->disk_offset;
2191 		key.type = BTRFS_EXTENT_ITEM_KEY;
2192 		key.offset = extent_info->disk_len;
2193 		ret = btrfs_alloc_reserved_file_extent(trans, root,
2194 						       btrfs_ino(inode),
2195 						       extent_info->file_offset,
2196 						       extent_info->qgroup_reserved,
2197 						       &key);
2198 	} else {
2199 		struct btrfs_ref ref = {
2200 			.action = BTRFS_ADD_DELAYED_REF,
2201 			.bytenr = extent_info->disk_offset,
2202 			.num_bytes = extent_info->disk_len,
2203 			.owning_root = btrfs_root_id(root),
2204 			.ref_root = btrfs_root_id(root),
2205 		};
2206 		u64 ref_offset;
2207 
2208 		ref_offset = extent_info->file_offset - extent_info->data_offset;
2209 		btrfs_init_data_ref(&ref, btrfs_ino(inode), ref_offset, 0, false);
2210 		ret = btrfs_inc_extent_ref(trans, &ref);
2211 	}
2212 
2213 	extent_info->insertions++;
2214 
2215 	return ret;
2216 }
2217 
2218 /*
2219  * The respective range must have been previously locked, as well as the inode.
2220  * The end offset is inclusive (last byte of the range).
2221  * @extent_info is NULL for fallocate's hole punching and non-NULL when replacing
2222  * the file range with an extent.
2223  * When not punching a hole, we don't want to end up in a state where we dropped
2224  * extents without inserting a new one, so we must abort the transaction to avoid
2225  * a corruption.
2226  */
2227 int btrfs_replace_file_extents(struct btrfs_inode *inode,
2228 			       struct btrfs_path *path, const u64 start,
2229 			       const u64 end,
2230 			       struct btrfs_replace_extent_info *extent_info,
2231 			       struct btrfs_trans_handle **trans_out)
2232 {
2233 	struct btrfs_drop_extents_args drop_args = { 0 };
2234 	struct btrfs_root *root = inode->root;
2235 	struct btrfs_fs_info *fs_info = root->fs_info;
2236 	u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1);
2237 	u64 ino_size = round_up(inode->vfs_inode.i_size, fs_info->sectorsize);
2238 	struct btrfs_trans_handle *trans = NULL;
2239 	struct btrfs_block_rsv *rsv;
2240 	unsigned int rsv_count;
2241 	u64 cur_offset;
2242 	u64 len = end - start;
2243 	int ret = 0;
2244 
2245 	if (end <= start)
2246 		return -EINVAL;
2247 
2248 	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
2249 	if (!rsv) {
2250 		ret = -ENOMEM;
2251 		goto out;
2252 	}
2253 	rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1);
2254 	rsv->failfast = true;
2255 
2256 	/*
2257 	 * 1 - update the inode
2258 	 * 1 - removing the extents in the range
2259 	 * 1 - adding the hole extent if no_holes isn't set or if we are
2260 	 *     replacing the range with a new extent
2261 	 */
2262 	if (!btrfs_fs_incompat(fs_info, NO_HOLES) || extent_info)
2263 		rsv_count = 3;
2264 	else
2265 		rsv_count = 2;
2266 
2267 	trans = btrfs_start_transaction(root, rsv_count);
2268 	if (IS_ERR(trans)) {
2269 		ret = PTR_ERR(trans);
2270 		trans = NULL;
2271 		goto out_free;
2272 	}
2273 
2274 	ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
2275 				      min_size, false);
2276 	if (WARN_ON(ret))
2277 		goto out_trans;
2278 	trans->block_rsv = rsv;
2279 
2280 	cur_offset = start;
2281 	drop_args.path = path;
2282 	drop_args.end = end + 1;
2283 	drop_args.drop_cache = true;
2284 	while (cur_offset < end) {
2285 		drop_args.start = cur_offset;
2286 		ret = btrfs_drop_extents(trans, root, inode, &drop_args);
2287 		/* If we are punching a hole decrement the inode's byte count */
2288 		if (!extent_info)
2289 			btrfs_update_inode_bytes(inode, 0,
2290 						 drop_args.bytes_found);
2291 		if (ret != -ENOSPC) {
2292 			/*
2293 			 * The only time we don't want to abort is if we are
2294 			 * attempting to clone a partial inline extent, in which
2295 			 * case we'll get EOPNOTSUPP.  However if we aren't
2296 			 * clone we need to abort no matter what, because if we
2297 			 * got EOPNOTSUPP via prealloc then we messed up and
2298 			 * need to abort.
2299 			 */
2300 			if (ret &&
2301 			    (ret != -EOPNOTSUPP ||
2302 			     (extent_info && extent_info->is_new_extent)))
2303 				btrfs_abort_transaction(trans, ret);
2304 			break;
2305 		}
2306 
2307 		trans->block_rsv = &fs_info->trans_block_rsv;
2308 
2309 		if (!extent_info && cur_offset < drop_args.drop_end &&
2310 		    cur_offset < ino_size) {
2311 			ret = fill_holes(trans, inode, path, cur_offset,
2312 					 drop_args.drop_end);
2313 			if (ret) {
2314 				/*
2315 				 * If we failed then we didn't insert our hole
2316 				 * entries for the area we dropped, so now the
2317 				 * fs is corrupted, so we must abort the
2318 				 * transaction.
2319 				 */
2320 				btrfs_abort_transaction(trans, ret);
2321 				break;
2322 			}
2323 		} else if (!extent_info && cur_offset < drop_args.drop_end) {
2324 			/*
2325 			 * We are past the i_size here, but since we didn't
2326 			 * insert holes we need to clear the mapped area so we
2327 			 * know to not set disk_i_size in this area until a new
2328 			 * file extent is inserted here.
2329 			 */
2330 			ret = btrfs_inode_clear_file_extent_range(inode,
2331 					cur_offset,
2332 					drop_args.drop_end - cur_offset);
2333 			if (ret) {
2334 				/*
2335 				 * We couldn't clear our area, so we could
2336 				 * presumably adjust up and corrupt the fs, so
2337 				 * we need to abort.
2338 				 */
2339 				btrfs_abort_transaction(trans, ret);
2340 				break;
2341 			}
2342 		}
2343 
2344 		if (extent_info &&
2345 		    drop_args.drop_end > extent_info->file_offset) {
2346 			u64 replace_len = drop_args.drop_end -
2347 					  extent_info->file_offset;
2348 
2349 			ret = btrfs_insert_replace_extent(trans, inode,	path,
2350 					extent_info, replace_len,
2351 					drop_args.bytes_found);
2352 			if (ret) {
2353 				btrfs_abort_transaction(trans, ret);
2354 				break;
2355 			}
2356 			extent_info->data_len -= replace_len;
2357 			extent_info->data_offset += replace_len;
2358 			extent_info->file_offset += replace_len;
2359 		}
2360 
2361 		/*
2362 		 * We are releasing our handle on the transaction, balance the
2363 		 * dirty pages of the btree inode and flush delayed items, and
2364 		 * then get a new transaction handle, which may now point to a
2365 		 * new transaction in case someone else may have committed the
2366 		 * transaction we used to replace/drop file extent items. So
2367 		 * bump the inode's iversion and update mtime and ctime except
2368 		 * if we are called from a dedupe context. This is because a
2369 		 * power failure/crash may happen after the transaction is
2370 		 * committed and before we finish replacing/dropping all the
2371 		 * file extent items we need.
2372 		 */
2373 		inode_inc_iversion(&inode->vfs_inode);
2374 
2375 		if (!extent_info || extent_info->update_times)
2376 			inode_set_mtime_to_ts(&inode->vfs_inode,
2377 					      inode_set_ctime_current(&inode->vfs_inode));
2378 
2379 		ret = btrfs_update_inode(trans, inode);
2380 		if (ret)
2381 			break;
2382 
2383 		btrfs_end_transaction(trans);
2384 		btrfs_btree_balance_dirty(fs_info);
2385 
2386 		trans = btrfs_start_transaction(root, rsv_count);
2387 		if (IS_ERR(trans)) {
2388 			ret = PTR_ERR(trans);
2389 			trans = NULL;
2390 			break;
2391 		}
2392 
2393 		ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
2394 					      rsv, min_size, false);
2395 		if (WARN_ON(ret))
2396 			break;
2397 		trans->block_rsv = rsv;
2398 
2399 		cur_offset = drop_args.drop_end;
2400 		len = end - cur_offset;
2401 		if (!extent_info && len) {
2402 			ret = find_first_non_hole(inode, &cur_offset, &len);
2403 			if (unlikely(ret < 0))
2404 				break;
2405 			if (ret && !len) {
2406 				ret = 0;
2407 				break;
2408 			}
2409 		}
2410 	}
2411 
2412 	/*
2413 	 * If we were cloning, force the next fsync to be a full one since we
2414 	 * we replaced (or just dropped in the case of cloning holes when
2415 	 * NO_HOLES is enabled) file extent items and did not setup new extent
2416 	 * maps for the replacement extents (or holes).
2417 	 */
2418 	if (extent_info && !extent_info->is_new_extent)
2419 		btrfs_set_inode_full_sync(inode);
2420 
2421 	if (ret)
2422 		goto out_trans;
2423 
2424 	trans->block_rsv = &fs_info->trans_block_rsv;
2425 	/*
2426 	 * If we are using the NO_HOLES feature we might have had already an
2427 	 * hole that overlaps a part of the region [lockstart, lockend] and
2428 	 * ends at (or beyond) lockend. Since we have no file extent items to
2429 	 * represent holes, drop_end can be less than lockend and so we must
2430 	 * make sure we have an extent map representing the existing hole (the
2431 	 * call to __btrfs_drop_extents() might have dropped the existing extent
2432 	 * map representing the existing hole), otherwise the fast fsync path
2433 	 * will not record the existence of the hole region
2434 	 * [existing_hole_start, lockend].
2435 	 */
2436 	if (drop_args.drop_end <= end)
2437 		drop_args.drop_end = end + 1;
2438 	/*
2439 	 * Don't insert file hole extent item if it's for a range beyond eof
2440 	 * (because it's useless) or if it represents a 0 bytes range (when
2441 	 * cur_offset == drop_end).
2442 	 */
2443 	if (!extent_info && cur_offset < ino_size &&
2444 	    cur_offset < drop_args.drop_end) {
2445 		ret = fill_holes(trans, inode, path, cur_offset,
2446 				 drop_args.drop_end);
2447 		if (ret) {
2448 			/* Same comment as above. */
2449 			btrfs_abort_transaction(trans, ret);
2450 			goto out_trans;
2451 		}
2452 	} else if (!extent_info && cur_offset < drop_args.drop_end) {
2453 		/* See the comment in the loop above for the reasoning here. */
2454 		ret = btrfs_inode_clear_file_extent_range(inode, cur_offset,
2455 					drop_args.drop_end - cur_offset);
2456 		if (ret) {
2457 			btrfs_abort_transaction(trans, ret);
2458 			goto out_trans;
2459 		}
2460 
2461 	}
2462 	if (extent_info) {
2463 		ret = btrfs_insert_replace_extent(trans, inode, path,
2464 				extent_info, extent_info->data_len,
2465 				drop_args.bytes_found);
2466 		if (ret) {
2467 			btrfs_abort_transaction(trans, ret);
2468 			goto out_trans;
2469 		}
2470 	}
2471 
2472 out_trans:
2473 	if (!trans)
2474 		goto out_free;
2475 
2476 	trans->block_rsv = &fs_info->trans_block_rsv;
2477 	if (ret)
2478 		btrfs_end_transaction(trans);
2479 	else
2480 		*trans_out = trans;
2481 out_free:
2482 	btrfs_free_block_rsv(fs_info, rsv);
2483 out:
2484 	return ret;
2485 }
2486 
2487 static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
2488 {
2489 	struct inode *inode = file_inode(file);
2490 	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
2491 	struct btrfs_root *root = BTRFS_I(inode)->root;
2492 	struct extent_state *cached_state = NULL;
2493 	struct btrfs_path *path;
2494 	struct btrfs_trans_handle *trans = NULL;
2495 	u64 lockstart;
2496 	u64 lockend;
2497 	u64 tail_start;
2498 	u64 tail_len;
2499 	u64 orig_start = offset;
2500 	int ret = 0;
2501 	bool same_block;
2502 	u64 ino_size;
2503 	bool truncated_block = false;
2504 	bool updated_inode = false;
2505 
2506 	btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2507 
2508 	ret = btrfs_wait_ordered_range(BTRFS_I(inode), offset, len);
2509 	if (ret)
2510 		goto out_only_mutex;
2511 
2512 	ino_size = round_up(inode->i_size, fs_info->sectorsize);
2513 	ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
2514 	if (ret < 0)
2515 		goto out_only_mutex;
2516 	if (ret && !len) {
2517 		/* Already in a large hole */
2518 		ret = 0;
2519 		goto out_only_mutex;
2520 	}
2521 
2522 	ret = file_modified(file);
2523 	if (ret)
2524 		goto out_only_mutex;
2525 
2526 	lockstart = round_up(offset, fs_info->sectorsize);
2527 	lockend = round_down(offset + len, fs_info->sectorsize) - 1;
2528 	same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
2529 		== (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
2530 	/*
2531 	 * We needn't truncate any block which is beyond the end of the file
2532 	 * because we are sure there is no data there.
2533 	 */
2534 	/*
2535 	 * Only do this if we are in the same block and we aren't doing the
2536 	 * entire block.
2537 	 */
2538 	if (same_block && len < fs_info->sectorsize) {
2539 		if (offset < ino_size) {
2540 			truncated_block = true;
2541 			ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
2542 						   0);
2543 		} else {
2544 			ret = 0;
2545 		}
2546 		goto out_only_mutex;
2547 	}
2548 
2549 	/* zero back part of the first block */
2550 	if (offset < ino_size) {
2551 		truncated_block = true;
2552 		ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
2553 		if (ret) {
2554 			btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2555 			return ret;
2556 		}
2557 	}
2558 
2559 	/* Check the aligned pages after the first unaligned page,
2560 	 * if offset != orig_start, which means the first unaligned page
2561 	 * including several following pages are already in holes,
2562 	 * the extra check can be skipped */
2563 	if (offset == orig_start) {
2564 		/* after truncate page, check hole again */
2565 		len = offset + len - lockstart;
2566 		offset = lockstart;
2567 		ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
2568 		if (ret < 0)
2569 			goto out_only_mutex;
2570 		if (ret && !len) {
2571 			ret = 0;
2572 			goto out_only_mutex;
2573 		}
2574 		lockstart = offset;
2575 	}
2576 
2577 	/* Check the tail unaligned part is in a hole */
2578 	tail_start = lockend + 1;
2579 	tail_len = offset + len - tail_start;
2580 	if (tail_len) {
2581 		ret = find_first_non_hole(BTRFS_I(inode), &tail_start, &tail_len);
2582 		if (unlikely(ret < 0))
2583 			goto out_only_mutex;
2584 		if (!ret) {
2585 			/* zero the front end of the last page */
2586 			if (tail_start + tail_len < ino_size) {
2587 				truncated_block = true;
2588 				ret = btrfs_truncate_block(BTRFS_I(inode),
2589 							tail_start + tail_len,
2590 							0, 1);
2591 				if (ret)
2592 					goto out_only_mutex;
2593 			}
2594 		}
2595 	}
2596 
2597 	if (lockend < lockstart) {
2598 		ret = 0;
2599 		goto out_only_mutex;
2600 	}
2601 
2602 	btrfs_punch_hole_lock_range(inode, lockstart, lockend, &cached_state);
2603 
2604 	path = btrfs_alloc_path();
2605 	if (!path) {
2606 		ret = -ENOMEM;
2607 		goto out;
2608 	}
2609 
2610 	ret = btrfs_replace_file_extents(BTRFS_I(inode), path, lockstart,
2611 					 lockend, NULL, &trans);
2612 	btrfs_free_path(path);
2613 	if (ret)
2614 		goto out;
2615 
2616 	ASSERT(trans != NULL);
2617 	inode_inc_iversion(inode);
2618 	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
2619 	ret = btrfs_update_inode(trans, BTRFS_I(inode));
2620 	updated_inode = true;
2621 	btrfs_end_transaction(trans);
2622 	btrfs_btree_balance_dirty(fs_info);
2623 out:
2624 	unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2625 		      &cached_state);
2626 out_only_mutex:
2627 	if (!updated_inode && truncated_block && !ret) {
2628 		/*
2629 		 * If we only end up zeroing part of a page, we still need to
2630 		 * update the inode item, so that all the time fields are
2631 		 * updated as well as the necessary btrfs inode in memory fields
2632 		 * for detecting, at fsync time, if the inode isn't yet in the
2633 		 * log tree or it's there but not up to date.
2634 		 */
2635 		struct timespec64 now = inode_set_ctime_current(inode);
2636 
2637 		inode_inc_iversion(inode);
2638 		inode_set_mtime_to_ts(inode, now);
2639 		trans = btrfs_start_transaction(root, 1);
2640 		if (IS_ERR(trans)) {
2641 			ret = PTR_ERR(trans);
2642 		} else {
2643 			int ret2;
2644 
2645 			ret = btrfs_update_inode(trans, BTRFS_I(inode));
2646 			ret2 = btrfs_end_transaction(trans);
2647 			if (!ret)
2648 				ret = ret2;
2649 		}
2650 	}
2651 	btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2652 	return ret;
2653 }
2654 
2655 /* Helper structure to record which range is already reserved */
2656 struct falloc_range {
2657 	struct list_head list;
2658 	u64 start;
2659 	u64 len;
2660 };
2661 
2662 /*
2663  * Helper function to add falloc range
2664  *
2665  * Caller should have locked the larger range of extent containing
2666  * [start, len)
2667  */
2668 static int add_falloc_range(struct list_head *head, u64 start, u64 len)
2669 {
2670 	struct falloc_range *range = NULL;
2671 
2672 	if (!list_empty(head)) {
2673 		/*
2674 		 * As fallocate iterates by bytenr order, we only need to check
2675 		 * the last range.
2676 		 */
2677 		range = list_last_entry(head, struct falloc_range, list);
2678 		if (range->start + range->len == start) {
2679 			range->len += len;
2680 			return 0;
2681 		}
2682 	}
2683 
2684 	range = kmalloc(sizeof(*range), GFP_KERNEL);
2685 	if (!range)
2686 		return -ENOMEM;
2687 	range->start = start;
2688 	range->len = len;
2689 	list_add_tail(&range->list, head);
2690 	return 0;
2691 }
2692 
2693 static int btrfs_fallocate_update_isize(struct inode *inode,
2694 					const u64 end,
2695 					const int mode)
2696 {
2697 	struct btrfs_trans_handle *trans;
2698 	struct btrfs_root *root = BTRFS_I(inode)->root;
2699 	int ret;
2700 	int ret2;
2701 
2702 	if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
2703 		return 0;
2704 
2705 	trans = btrfs_start_transaction(root, 1);
2706 	if (IS_ERR(trans))
2707 		return PTR_ERR(trans);
2708 
2709 	inode_set_ctime_current(inode);
2710 	i_size_write(inode, end);
2711 	btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
2712 	ret = btrfs_update_inode(trans, BTRFS_I(inode));
2713 	ret2 = btrfs_end_transaction(trans);
2714 
2715 	return ret ? ret : ret2;
2716 }
2717 
2718 enum {
2719 	RANGE_BOUNDARY_WRITTEN_EXTENT,
2720 	RANGE_BOUNDARY_PREALLOC_EXTENT,
2721 	RANGE_BOUNDARY_HOLE,
2722 };
2723 
2724 static int btrfs_zero_range_check_range_boundary(struct btrfs_inode *inode,
2725 						 u64 offset)
2726 {
2727 	const u64 sectorsize = inode->root->fs_info->sectorsize;
2728 	struct extent_map *em;
2729 	int ret;
2730 
2731 	offset = round_down(offset, sectorsize);
2732 	em = btrfs_get_extent(inode, NULL, offset, sectorsize);
2733 	if (IS_ERR(em))
2734 		return PTR_ERR(em);
2735 
2736 	if (em->disk_bytenr == EXTENT_MAP_HOLE)
2737 		ret = RANGE_BOUNDARY_HOLE;
2738 	else if (em->flags & EXTENT_FLAG_PREALLOC)
2739 		ret = RANGE_BOUNDARY_PREALLOC_EXTENT;
2740 	else
2741 		ret = RANGE_BOUNDARY_WRITTEN_EXTENT;
2742 
2743 	free_extent_map(em);
2744 	return ret;
2745 }
2746 
2747 static int btrfs_zero_range(struct inode *inode,
2748 			    loff_t offset,
2749 			    loff_t len,
2750 			    const int mode)
2751 {
2752 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2753 	struct extent_map *em;
2754 	struct extent_changeset *data_reserved = NULL;
2755 	int ret;
2756 	u64 alloc_hint = 0;
2757 	const u64 sectorsize = fs_info->sectorsize;
2758 	u64 alloc_start = round_down(offset, sectorsize);
2759 	u64 alloc_end = round_up(offset + len, sectorsize);
2760 	u64 bytes_to_reserve = 0;
2761 	bool space_reserved = false;
2762 
2763 	em = btrfs_get_extent(BTRFS_I(inode), NULL, alloc_start,
2764 			      alloc_end - alloc_start);
2765 	if (IS_ERR(em)) {
2766 		ret = PTR_ERR(em);
2767 		goto out;
2768 	}
2769 
2770 	/*
2771 	 * Avoid hole punching and extent allocation for some cases. More cases
2772 	 * could be considered, but these are unlikely common and we keep things
2773 	 * as simple as possible for now. Also, intentionally, if the target
2774 	 * range contains one or more prealloc extents together with regular
2775 	 * extents and holes, we drop all the existing extents and allocate a
2776 	 * new prealloc extent, so that we get a larger contiguous disk extent.
2777 	 */
2778 	if (em->start <= alloc_start && (em->flags & EXTENT_FLAG_PREALLOC)) {
2779 		const u64 em_end = em->start + em->len;
2780 
2781 		if (em_end >= offset + len) {
2782 			/*
2783 			 * The whole range is already a prealloc extent,
2784 			 * do nothing except updating the inode's i_size if
2785 			 * needed.
2786 			 */
2787 			free_extent_map(em);
2788 			ret = btrfs_fallocate_update_isize(inode, offset + len,
2789 							   mode);
2790 			goto out;
2791 		}
2792 		/*
2793 		 * Part of the range is already a prealloc extent, so operate
2794 		 * only on the remaining part of the range.
2795 		 */
2796 		alloc_start = em_end;
2797 		ASSERT(IS_ALIGNED(alloc_start, sectorsize));
2798 		len = offset + len - alloc_start;
2799 		offset = alloc_start;
2800 		alloc_hint = extent_map_block_start(em) + em->len;
2801 	}
2802 	free_extent_map(em);
2803 
2804 	if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
2805 	    BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
2806 		em = btrfs_get_extent(BTRFS_I(inode), NULL, alloc_start, sectorsize);
2807 		if (IS_ERR(em)) {
2808 			ret = PTR_ERR(em);
2809 			goto out;
2810 		}
2811 
2812 		if (em->flags & EXTENT_FLAG_PREALLOC) {
2813 			free_extent_map(em);
2814 			ret = btrfs_fallocate_update_isize(inode, offset + len,
2815 							   mode);
2816 			goto out;
2817 		}
2818 		if (len < sectorsize && em->disk_bytenr != EXTENT_MAP_HOLE) {
2819 			free_extent_map(em);
2820 			ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
2821 						   0);
2822 			if (!ret)
2823 				ret = btrfs_fallocate_update_isize(inode,
2824 								   offset + len,
2825 								   mode);
2826 			return ret;
2827 		}
2828 		free_extent_map(em);
2829 		alloc_start = round_down(offset, sectorsize);
2830 		alloc_end = alloc_start + sectorsize;
2831 		goto reserve_space;
2832 	}
2833 
2834 	alloc_start = round_up(offset, sectorsize);
2835 	alloc_end = round_down(offset + len, sectorsize);
2836 
2837 	/*
2838 	 * For unaligned ranges, check the pages at the boundaries, they might
2839 	 * map to an extent, in which case we need to partially zero them, or
2840 	 * they might map to a hole, in which case we need our allocation range
2841 	 * to cover them.
2842 	 */
2843 	if (!IS_ALIGNED(offset, sectorsize)) {
2844 		ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
2845 							    offset);
2846 		if (ret < 0)
2847 			goto out;
2848 		if (ret == RANGE_BOUNDARY_HOLE) {
2849 			alloc_start = round_down(offset, sectorsize);
2850 			ret = 0;
2851 		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
2852 			ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
2853 			if (ret)
2854 				goto out;
2855 		} else {
2856 			ret = 0;
2857 		}
2858 	}
2859 
2860 	if (!IS_ALIGNED(offset + len, sectorsize)) {
2861 		ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
2862 							    offset + len);
2863 		if (ret < 0)
2864 			goto out;
2865 		if (ret == RANGE_BOUNDARY_HOLE) {
2866 			alloc_end = round_up(offset + len, sectorsize);
2867 			ret = 0;
2868 		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
2869 			ret = btrfs_truncate_block(BTRFS_I(inode), offset + len,
2870 						   0, 1);
2871 			if (ret)
2872 				goto out;
2873 		} else {
2874 			ret = 0;
2875 		}
2876 	}
2877 
2878 reserve_space:
2879 	if (alloc_start < alloc_end) {
2880 		struct extent_state *cached_state = NULL;
2881 		const u64 lockstart = alloc_start;
2882 		const u64 lockend = alloc_end - 1;
2883 
2884 		bytes_to_reserve = alloc_end - alloc_start;
2885 		ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
2886 						      bytes_to_reserve);
2887 		if (ret < 0)
2888 			goto out;
2889 		space_reserved = true;
2890 		btrfs_punch_hole_lock_range(inode, lockstart, lockend,
2891 					    &cached_state);
2892 		ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
2893 						alloc_start, bytes_to_reserve);
2894 		if (ret) {
2895 			unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
2896 				      lockend, &cached_state);
2897 			goto out;
2898 		}
2899 		ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
2900 						alloc_end - alloc_start,
2901 						fs_info->sectorsize,
2902 						offset + len, &alloc_hint);
2903 		unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2904 			      &cached_state);
2905 		/* btrfs_prealloc_file_range releases reserved space on error */
2906 		if (ret) {
2907 			space_reserved = false;
2908 			goto out;
2909 		}
2910 	}
2911 	ret = btrfs_fallocate_update_isize(inode, offset + len, mode);
2912  out:
2913 	if (ret && space_reserved)
2914 		btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
2915 					       alloc_start, bytes_to_reserve);
2916 	extent_changeset_free(data_reserved);
2917 
2918 	return ret;
2919 }
2920 
2921 static long btrfs_fallocate(struct file *file, int mode,
2922 			    loff_t offset, loff_t len)
2923 {
2924 	struct inode *inode = file_inode(file);
2925 	struct extent_state *cached_state = NULL;
2926 	struct extent_changeset *data_reserved = NULL;
2927 	struct falloc_range *range;
2928 	struct falloc_range *tmp;
2929 	LIST_HEAD(reserve_list);
2930 	u64 cur_offset;
2931 	u64 last_byte;
2932 	u64 alloc_start;
2933 	u64 alloc_end;
2934 	u64 alloc_hint = 0;
2935 	u64 locked_end;
2936 	u64 actual_end = 0;
2937 	u64 data_space_needed = 0;
2938 	u64 data_space_reserved = 0;
2939 	u64 qgroup_reserved = 0;
2940 	struct extent_map *em;
2941 	int blocksize = BTRFS_I(inode)->root->fs_info->sectorsize;
2942 	int ret;
2943 
2944 	/* Do not allow fallocate in ZONED mode */
2945 	if (btrfs_is_zoned(inode_to_fs_info(inode)))
2946 		return -EOPNOTSUPP;
2947 
2948 	alloc_start = round_down(offset, blocksize);
2949 	alloc_end = round_up(offset + len, blocksize);
2950 	cur_offset = alloc_start;
2951 
2952 	/* Make sure we aren't being give some crap mode */
2953 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
2954 		     FALLOC_FL_ZERO_RANGE))
2955 		return -EOPNOTSUPP;
2956 
2957 	if (mode & FALLOC_FL_PUNCH_HOLE)
2958 		return btrfs_punch_hole(file, offset, len);
2959 
2960 	btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2961 
2962 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
2963 		ret = inode_newsize_ok(inode, offset + len);
2964 		if (ret)
2965 			goto out;
2966 	}
2967 
2968 	ret = file_modified(file);
2969 	if (ret)
2970 		goto out;
2971 
2972 	/*
2973 	 * TODO: Move these two operations after we have checked
2974 	 * accurate reserved space, or fallocate can still fail but
2975 	 * with page truncated or size expanded.
2976 	 *
2977 	 * But that's a minor problem and won't do much harm BTW.
2978 	 */
2979 	if (alloc_start > inode->i_size) {
2980 		ret = btrfs_cont_expand(BTRFS_I(inode), i_size_read(inode),
2981 					alloc_start);
2982 		if (ret)
2983 			goto out;
2984 	} else if (offset + len > inode->i_size) {
2985 		/*
2986 		 * If we are fallocating from the end of the file onward we
2987 		 * need to zero out the end of the block if i_size lands in the
2988 		 * middle of a block.
2989 		 */
2990 		ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, 0, 0);
2991 		if (ret)
2992 			goto out;
2993 	}
2994 
2995 	/*
2996 	 * We have locked the inode at the VFS level (in exclusive mode) and we
2997 	 * have locked the i_mmap_lock lock (in exclusive mode). Now before
2998 	 * locking the file range, flush all dealloc in the range and wait for
2999 	 * all ordered extents in the range to complete. After this we can lock
3000 	 * the file range and, due to the previous locking we did, we know there
3001 	 * can't be more delalloc or ordered extents in the range.
3002 	 */
3003 	ret = btrfs_wait_ordered_range(BTRFS_I(inode), alloc_start,
3004 				       alloc_end - alloc_start);
3005 	if (ret)
3006 		goto out;
3007 
3008 	if (mode & FALLOC_FL_ZERO_RANGE) {
3009 		ret = btrfs_zero_range(inode, offset, len, mode);
3010 		btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
3011 		return ret;
3012 	}
3013 
3014 	locked_end = alloc_end - 1;
3015 	lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3016 		    &cached_state);
3017 
3018 	btrfs_assert_inode_range_clean(BTRFS_I(inode), alloc_start, locked_end);
3019 
3020 	/* First, check if we exceed the qgroup limit */
3021 	while (cur_offset < alloc_end) {
3022 		em = btrfs_get_extent(BTRFS_I(inode), NULL, cur_offset,
3023 				      alloc_end - cur_offset);
3024 		if (IS_ERR(em)) {
3025 			ret = PTR_ERR(em);
3026 			break;
3027 		}
3028 		last_byte = min(extent_map_end(em), alloc_end);
3029 		actual_end = min_t(u64, extent_map_end(em), offset + len);
3030 		last_byte = ALIGN(last_byte, blocksize);
3031 		if (em->disk_bytenr == EXTENT_MAP_HOLE ||
3032 		    (cur_offset >= inode->i_size &&
3033 		     !(em->flags & EXTENT_FLAG_PREALLOC))) {
3034 			const u64 range_len = last_byte - cur_offset;
3035 
3036 			ret = add_falloc_range(&reserve_list, cur_offset, range_len);
3037 			if (ret < 0) {
3038 				free_extent_map(em);
3039 				break;
3040 			}
3041 			ret = btrfs_qgroup_reserve_data(BTRFS_I(inode),
3042 					&data_reserved, cur_offset, range_len);
3043 			if (ret < 0) {
3044 				free_extent_map(em);
3045 				break;
3046 			}
3047 			qgroup_reserved += range_len;
3048 			data_space_needed += range_len;
3049 		}
3050 		free_extent_map(em);
3051 		cur_offset = last_byte;
3052 	}
3053 
3054 	if (!ret && data_space_needed > 0) {
3055 		/*
3056 		 * We are safe to reserve space here as we can't have delalloc
3057 		 * in the range, see above.
3058 		 */
3059 		ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3060 						      data_space_needed);
3061 		if (!ret)
3062 			data_space_reserved = data_space_needed;
3063 	}
3064 
3065 	/*
3066 	 * If ret is still 0, means we're OK to fallocate.
3067 	 * Or just cleanup the list and exit.
3068 	 */
3069 	list_for_each_entry_safe(range, tmp, &reserve_list, list) {
3070 		if (!ret) {
3071 			ret = btrfs_prealloc_file_range(inode, mode,
3072 					range->start,
3073 					range->len, blocksize,
3074 					offset + len, &alloc_hint);
3075 			/*
3076 			 * btrfs_prealloc_file_range() releases space even
3077 			 * if it returns an error.
3078 			 */
3079 			data_space_reserved -= range->len;
3080 			qgroup_reserved -= range->len;
3081 		} else if (data_space_reserved > 0) {
3082 			btrfs_free_reserved_data_space(BTRFS_I(inode),
3083 					       data_reserved, range->start,
3084 					       range->len);
3085 			data_space_reserved -= range->len;
3086 			qgroup_reserved -= range->len;
3087 		} else if (qgroup_reserved > 0) {
3088 			btrfs_qgroup_free_data(BTRFS_I(inode), data_reserved,
3089 					       range->start, range->len, NULL);
3090 			qgroup_reserved -= range->len;
3091 		}
3092 		list_del(&range->list);
3093 		kfree(range);
3094 	}
3095 	if (ret < 0)
3096 		goto out_unlock;
3097 
3098 	/*
3099 	 * We didn't need to allocate any more space, but we still extended the
3100 	 * size of the file so we need to update i_size and the inode item.
3101 	 */
3102 	ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
3103 out_unlock:
3104 	unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3105 		      &cached_state);
3106 out:
3107 	btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
3108 	extent_changeset_free(data_reserved);
3109 	return ret;
3110 }
3111 
3112 /*
3113  * Helper for btrfs_find_delalloc_in_range(). Find a subrange in a given range
3114  * that has unflushed and/or flushing delalloc. There might be other adjacent
3115  * subranges after the one it found, so btrfs_find_delalloc_in_range() keeps
3116  * looping while it gets adjacent subranges, and merging them together.
3117  */
3118 static bool find_delalloc_subrange(struct btrfs_inode *inode, u64 start, u64 end,
3119 				   struct extent_state **cached_state,
3120 				   bool *search_io_tree,
3121 				   u64 *delalloc_start_ret, u64 *delalloc_end_ret)
3122 {
3123 	u64 len = end + 1 - start;
3124 	u64 delalloc_len = 0;
3125 	struct btrfs_ordered_extent *oe;
3126 	u64 oe_start;
3127 	u64 oe_end;
3128 
3129 	/*
3130 	 * Search the io tree first for EXTENT_DELALLOC. If we find any, it
3131 	 * means we have delalloc (dirty pages) for which writeback has not
3132 	 * started yet.
3133 	 */
3134 	if (*search_io_tree) {
3135 		spin_lock(&inode->lock);
3136 		if (inode->delalloc_bytes > 0) {
3137 			spin_unlock(&inode->lock);
3138 			*delalloc_start_ret = start;
3139 			delalloc_len = count_range_bits(&inode->io_tree,
3140 							delalloc_start_ret, end,
3141 							len, EXTENT_DELALLOC, 1,
3142 							cached_state);
3143 		} else {
3144 			spin_unlock(&inode->lock);
3145 		}
3146 	}
3147 
3148 	if (delalloc_len > 0) {
3149 		/*
3150 		 * If delalloc was found then *delalloc_start_ret has a sector size
3151 		 * aligned value (rounded down).
3152 		 */
3153 		*delalloc_end_ret = *delalloc_start_ret + delalloc_len - 1;
3154 
3155 		if (*delalloc_start_ret == start) {
3156 			/* Delalloc for the whole range, nothing more to do. */
3157 			if (*delalloc_end_ret == end)
3158 				return true;
3159 			/* Else trim our search range for ordered extents. */
3160 			start = *delalloc_end_ret + 1;
3161 			len = end + 1 - start;
3162 		}
3163 	} else {
3164 		/* No delalloc, future calls don't need to search again. */
3165 		*search_io_tree = false;
3166 	}
3167 
3168 	/*
3169 	 * Now also check if there's any ordered extent in the range.
3170 	 * We do this because:
3171 	 *
3172 	 * 1) When delalloc is flushed, the file range is locked, we clear the
3173 	 *    EXTENT_DELALLOC bit from the io tree and create an extent map and
3174 	 *    an ordered extent for the write. So we might just have been called
3175 	 *    after delalloc is flushed and before the ordered extent completes
3176 	 *    and inserts the new file extent item in the subvolume's btree;
3177 	 *
3178 	 * 2) We may have an ordered extent created by flushing delalloc for a
3179 	 *    subrange that starts before the subrange we found marked with
3180 	 *    EXTENT_DELALLOC in the io tree.
3181 	 *
3182 	 * We could also use the extent map tree to find such delalloc that is
3183 	 * being flushed, but using the ordered extents tree is more efficient
3184 	 * because it's usually much smaller as ordered extents are removed from
3185 	 * the tree once they complete. With the extent maps, we mau have them
3186 	 * in the extent map tree for a very long time, and they were either
3187 	 * created by previous writes or loaded by read operations.
3188 	 */
3189 	oe = btrfs_lookup_first_ordered_range(inode, start, len);
3190 	if (!oe)
3191 		return (delalloc_len > 0);
3192 
3193 	/* The ordered extent may span beyond our search range. */
3194 	oe_start = max(oe->file_offset, start);
3195 	oe_end = min(oe->file_offset + oe->num_bytes - 1, end);
3196 
3197 	btrfs_put_ordered_extent(oe);
3198 
3199 	/* Don't have unflushed delalloc, return the ordered extent range. */
3200 	if (delalloc_len == 0) {
3201 		*delalloc_start_ret = oe_start;
3202 		*delalloc_end_ret = oe_end;
3203 		return true;
3204 	}
3205 
3206 	/*
3207 	 * We have both unflushed delalloc (io_tree) and an ordered extent.
3208 	 * If the ranges are adjacent returned a combined range, otherwise
3209 	 * return the leftmost range.
3210 	 */
3211 	if (oe_start < *delalloc_start_ret) {
3212 		if (oe_end < *delalloc_start_ret)
3213 			*delalloc_end_ret = oe_end;
3214 		*delalloc_start_ret = oe_start;
3215 	} else if (*delalloc_end_ret + 1 == oe_start) {
3216 		*delalloc_end_ret = oe_end;
3217 	}
3218 
3219 	return true;
3220 }
3221 
3222 /*
3223  * Check if there's delalloc in a given range.
3224  *
3225  * @inode:               The inode.
3226  * @start:               The start offset of the range. It does not need to be
3227  *                       sector size aligned.
3228  * @end:                 The end offset (inclusive value) of the search range.
3229  *                       It does not need to be sector size aligned.
3230  * @cached_state:        Extent state record used for speeding up delalloc
3231  *                       searches in the inode's io_tree. Can be NULL.
3232  * @delalloc_start_ret:  Output argument, set to the start offset of the
3233  *                       subrange found with delalloc (may not be sector size
3234  *                       aligned).
3235  * @delalloc_end_ret:    Output argument, set to he end offset (inclusive value)
3236  *                       of the subrange found with delalloc.
3237  *
3238  * Returns true if a subrange with delalloc is found within the given range, and
3239  * if so it sets @delalloc_start_ret and @delalloc_end_ret with the start and
3240  * end offsets of the subrange.
3241  */
3242 bool btrfs_find_delalloc_in_range(struct btrfs_inode *inode, u64 start, u64 end,
3243 				  struct extent_state **cached_state,
3244 				  u64 *delalloc_start_ret, u64 *delalloc_end_ret)
3245 {
3246 	u64 cur_offset = round_down(start, inode->root->fs_info->sectorsize);
3247 	u64 prev_delalloc_end = 0;
3248 	bool search_io_tree = true;
3249 	bool ret = false;
3250 
3251 	while (cur_offset <= end) {
3252 		u64 delalloc_start;
3253 		u64 delalloc_end;
3254 		bool delalloc;
3255 
3256 		delalloc = find_delalloc_subrange(inode, cur_offset, end,
3257 						  cached_state, &search_io_tree,
3258 						  &delalloc_start,
3259 						  &delalloc_end);
3260 		if (!delalloc)
3261 			break;
3262 
3263 		if (prev_delalloc_end == 0) {
3264 			/* First subrange found. */
3265 			*delalloc_start_ret = max(delalloc_start, start);
3266 			*delalloc_end_ret = delalloc_end;
3267 			ret = true;
3268 		} else if (delalloc_start == prev_delalloc_end + 1) {
3269 			/* Subrange adjacent to the previous one, merge them. */
3270 			*delalloc_end_ret = delalloc_end;
3271 		} else {
3272 			/* Subrange not adjacent to the previous one, exit. */
3273 			break;
3274 		}
3275 
3276 		prev_delalloc_end = delalloc_end;
3277 		cur_offset = delalloc_end + 1;
3278 		cond_resched();
3279 	}
3280 
3281 	return ret;
3282 }
3283 
3284 /*
3285  * Check if there's a hole or delalloc range in a range representing a hole (or
3286  * prealloc extent) found in the inode's subvolume btree.
3287  *
3288  * @inode:      The inode.
3289  * @whence:     Seek mode (SEEK_DATA or SEEK_HOLE).
3290  * @start:      Start offset of the hole region. It does not need to be sector
3291  *              size aligned.
3292  * @end:        End offset (inclusive value) of the hole region. It does not
3293  *              need to be sector size aligned.
3294  * @start_ret:  Return parameter, used to set the start of the subrange in the
3295  *              hole that matches the search criteria (seek mode), if such
3296  *              subrange is found (return value of the function is true).
3297  *              The value returned here may not be sector size aligned.
3298  *
3299  * Returns true if a subrange matching the given seek mode is found, and if one
3300  * is found, it updates @start_ret with the start of the subrange.
3301  */
3302 static bool find_desired_extent_in_hole(struct btrfs_inode *inode, int whence,
3303 					struct extent_state **cached_state,
3304 					u64 start, u64 end, u64 *start_ret)
3305 {
3306 	u64 delalloc_start;
3307 	u64 delalloc_end;
3308 	bool delalloc;
3309 
3310 	delalloc = btrfs_find_delalloc_in_range(inode, start, end, cached_state,
3311 						&delalloc_start, &delalloc_end);
3312 	if (delalloc && whence == SEEK_DATA) {
3313 		*start_ret = delalloc_start;
3314 		return true;
3315 	}
3316 
3317 	if (delalloc && whence == SEEK_HOLE) {
3318 		/*
3319 		 * We found delalloc but it starts after out start offset. So we
3320 		 * have a hole between our start offset and the delalloc start.
3321 		 */
3322 		if (start < delalloc_start) {
3323 			*start_ret = start;
3324 			return true;
3325 		}
3326 		/*
3327 		 * Delalloc range starts at our start offset.
3328 		 * If the delalloc range's length is smaller than our range,
3329 		 * then it means we have a hole that starts where the delalloc
3330 		 * subrange ends.
3331 		 */
3332 		if (delalloc_end < end) {
3333 			*start_ret = delalloc_end + 1;
3334 			return true;
3335 		}
3336 
3337 		/* There's delalloc for the whole range. */
3338 		return false;
3339 	}
3340 
3341 	if (!delalloc && whence == SEEK_HOLE) {
3342 		*start_ret = start;
3343 		return true;
3344 	}
3345 
3346 	/*
3347 	 * No delalloc in the range and we are seeking for data. The caller has
3348 	 * to iterate to the next extent item in the subvolume btree.
3349 	 */
3350 	return false;
3351 }
3352 
3353 static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
3354 {
3355 	struct btrfs_inode *inode = BTRFS_I(file->f_mapping->host);
3356 	struct btrfs_file_private *private;
3357 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3358 	struct extent_state *cached_state = NULL;
3359 	struct extent_state **delalloc_cached_state;
3360 	const loff_t i_size = i_size_read(&inode->vfs_inode);
3361 	const u64 ino = btrfs_ino(inode);
3362 	struct btrfs_root *root = inode->root;
3363 	struct btrfs_path *path;
3364 	struct btrfs_key key;
3365 	u64 last_extent_end;
3366 	u64 lockstart;
3367 	u64 lockend;
3368 	u64 start;
3369 	int ret;
3370 	bool found = false;
3371 
3372 	if (i_size == 0 || offset >= i_size)
3373 		return -ENXIO;
3374 
3375 	/*
3376 	 * Quick path. If the inode has no prealloc extents and its number of
3377 	 * bytes used matches its i_size, then it can not have holes.
3378 	 */
3379 	if (whence == SEEK_HOLE &&
3380 	    !(inode->flags & BTRFS_INODE_PREALLOC) &&
3381 	    inode_get_bytes(&inode->vfs_inode) == i_size)
3382 		return i_size;
3383 
3384 	spin_lock(&inode->lock);
3385 	private = file->private_data;
3386 	spin_unlock(&inode->lock);
3387 
3388 	if (private && private->owner_task != current) {
3389 		/*
3390 		 * Not allocated by us, don't use it as its cached state is used
3391 		 * by the task that allocated it and we don't want neither to
3392 		 * mess with it nor get incorrect results because it reflects an
3393 		 * invalid state for the current task.
3394 		 */
3395 		private = NULL;
3396 	} else if (!private) {
3397 		private = kzalloc(sizeof(*private), GFP_KERNEL);
3398 		/*
3399 		 * No worries if memory allocation failed.
3400 		 * The private structure is used only for speeding up multiple
3401 		 * lseek SEEK_HOLE/DATA calls to a file when there's delalloc,
3402 		 * so everything will still be correct.
3403 		 */
3404 		if (private) {
3405 			bool free = false;
3406 
3407 			private->owner_task = current;
3408 
3409 			spin_lock(&inode->lock);
3410 			if (file->private_data)
3411 				free = true;
3412 			else
3413 				file->private_data = private;
3414 			spin_unlock(&inode->lock);
3415 
3416 			if (free) {
3417 				kfree(private);
3418 				private = NULL;
3419 			}
3420 		}
3421 	}
3422 
3423 	if (private)
3424 		delalloc_cached_state = &private->llseek_cached_state;
3425 	else
3426 		delalloc_cached_state = NULL;
3427 
3428 	/*
3429 	 * offset can be negative, in this case we start finding DATA/HOLE from
3430 	 * the very start of the file.
3431 	 */
3432 	start = max_t(loff_t, 0, offset);
3433 
3434 	lockstart = round_down(start, fs_info->sectorsize);
3435 	lockend = round_up(i_size, fs_info->sectorsize);
3436 	if (lockend <= lockstart)
3437 		lockend = lockstart + fs_info->sectorsize;
3438 	lockend--;
3439 
3440 	path = btrfs_alloc_path();
3441 	if (!path)
3442 		return -ENOMEM;
3443 	path->reada = READA_FORWARD;
3444 
3445 	key.objectid = ino;
3446 	key.type = BTRFS_EXTENT_DATA_KEY;
3447 	key.offset = start;
3448 
3449 	last_extent_end = lockstart;
3450 
3451 	lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3452 
3453 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3454 	if (ret < 0) {
3455 		goto out;
3456 	} else if (ret > 0 && path->slots[0] > 0) {
3457 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
3458 		if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
3459 			path->slots[0]--;
3460 	}
3461 
3462 	while (start < i_size) {
3463 		struct extent_buffer *leaf = path->nodes[0];
3464 		struct btrfs_file_extent_item *extent;
3465 		u64 extent_end;
3466 		u8 type;
3467 
3468 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3469 			ret = btrfs_next_leaf(root, path);
3470 			if (ret < 0)
3471 				goto out;
3472 			else if (ret > 0)
3473 				break;
3474 
3475 			leaf = path->nodes[0];
3476 		}
3477 
3478 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3479 		if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
3480 			break;
3481 
3482 		extent_end = btrfs_file_extent_end(path);
3483 
3484 		/*
3485 		 * In the first iteration we may have a slot that points to an
3486 		 * extent that ends before our start offset, so skip it.
3487 		 */
3488 		if (extent_end <= start) {
3489 			path->slots[0]++;
3490 			continue;
3491 		}
3492 
3493 		/* We have an implicit hole, NO_HOLES feature is likely set. */
3494 		if (last_extent_end < key.offset) {
3495 			u64 search_start = last_extent_end;
3496 			u64 found_start;
3497 
3498 			/*
3499 			 * First iteration, @start matches @offset and it's
3500 			 * within the hole.
3501 			 */
3502 			if (start == offset)
3503 				search_start = offset;
3504 
3505 			found = find_desired_extent_in_hole(inode, whence,
3506 							    delalloc_cached_state,
3507 							    search_start,
3508 							    key.offset - 1,
3509 							    &found_start);
3510 			if (found) {
3511 				start = found_start;
3512 				break;
3513 			}
3514 			/*
3515 			 * Didn't find data or a hole (due to delalloc) in the
3516 			 * implicit hole range, so need to analyze the extent.
3517 			 */
3518 		}
3519 
3520 		extent = btrfs_item_ptr(leaf, path->slots[0],
3521 					struct btrfs_file_extent_item);
3522 		type = btrfs_file_extent_type(leaf, extent);
3523 
3524 		/*
3525 		 * Can't access the extent's disk_bytenr field if this is an
3526 		 * inline extent, since at that offset, it's where the extent
3527 		 * data starts.
3528 		 */
3529 		if (type == BTRFS_FILE_EXTENT_PREALLOC ||
3530 		    (type == BTRFS_FILE_EXTENT_REG &&
3531 		     btrfs_file_extent_disk_bytenr(leaf, extent) == 0)) {
3532 			/*
3533 			 * Explicit hole or prealloc extent, search for delalloc.
3534 			 * A prealloc extent is treated like a hole.
3535 			 */
3536 			u64 search_start = key.offset;
3537 			u64 found_start;
3538 
3539 			/*
3540 			 * First iteration, @start matches @offset and it's
3541 			 * within the hole.
3542 			 */
3543 			if (start == offset)
3544 				search_start = offset;
3545 
3546 			found = find_desired_extent_in_hole(inode, whence,
3547 							    delalloc_cached_state,
3548 							    search_start,
3549 							    extent_end - 1,
3550 							    &found_start);
3551 			if (found) {
3552 				start = found_start;
3553 				break;
3554 			}
3555 			/*
3556 			 * Didn't find data or a hole (due to delalloc) in the
3557 			 * implicit hole range, so need to analyze the next
3558 			 * extent item.
3559 			 */
3560 		} else {
3561 			/*
3562 			 * Found a regular or inline extent.
3563 			 * If we are seeking for data, adjust the start offset
3564 			 * and stop, we're done.
3565 			 */
3566 			if (whence == SEEK_DATA) {
3567 				start = max_t(u64, key.offset, offset);
3568 				found = true;
3569 				break;
3570 			}
3571 			/*
3572 			 * Else, we are seeking for a hole, check the next file
3573 			 * extent item.
3574 			 */
3575 		}
3576 
3577 		start = extent_end;
3578 		last_extent_end = extent_end;
3579 		path->slots[0]++;
3580 		if (fatal_signal_pending(current)) {
3581 			ret = -EINTR;
3582 			goto out;
3583 		}
3584 		cond_resched();
3585 	}
3586 
3587 	/* We have an implicit hole from the last extent found up to i_size. */
3588 	if (!found && start < i_size) {
3589 		found = find_desired_extent_in_hole(inode, whence,
3590 						    delalloc_cached_state, start,
3591 						    i_size - 1, &start);
3592 		if (!found)
3593 			start = i_size;
3594 	}
3595 
3596 out:
3597 	unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3598 	btrfs_free_path(path);
3599 
3600 	if (ret < 0)
3601 		return ret;
3602 
3603 	if (whence == SEEK_DATA && start >= i_size)
3604 		return -ENXIO;
3605 
3606 	return min_t(loff_t, start, i_size);
3607 }
3608 
3609 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
3610 {
3611 	struct inode *inode = file->f_mapping->host;
3612 
3613 	switch (whence) {
3614 	default:
3615 		return generic_file_llseek(file, offset, whence);
3616 	case SEEK_DATA:
3617 	case SEEK_HOLE:
3618 		btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
3619 		offset = find_desired_extent(file, offset, whence);
3620 		btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
3621 		break;
3622 	}
3623 
3624 	if (offset < 0)
3625 		return offset;
3626 
3627 	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3628 }
3629 
3630 static int btrfs_file_open(struct inode *inode, struct file *filp)
3631 {
3632 	int ret;
3633 
3634 	filp->f_mode |= FMODE_NOWAIT | FMODE_CAN_ODIRECT;
3635 
3636 	ret = fsverity_file_open(inode, filp);
3637 	if (ret)
3638 		return ret;
3639 	return generic_file_open(inode, filp);
3640 }
3641 
3642 static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
3643 {
3644 	ssize_t ret = 0;
3645 
3646 	if (iocb->ki_flags & IOCB_DIRECT) {
3647 		ret = btrfs_direct_read(iocb, to);
3648 		if (ret < 0 || !iov_iter_count(to) ||
3649 		    iocb->ki_pos >= i_size_read(file_inode(iocb->ki_filp)))
3650 			return ret;
3651 	}
3652 
3653 	return filemap_read(iocb, to, ret);
3654 }
3655 
3656 const struct file_operations btrfs_file_operations = {
3657 	.llseek		= btrfs_file_llseek,
3658 	.read_iter      = btrfs_file_read_iter,
3659 	.splice_read	= filemap_splice_read,
3660 	.write_iter	= btrfs_file_write_iter,
3661 	.splice_write	= iter_file_splice_write,
3662 	.mmap		= btrfs_file_mmap,
3663 	.open		= btrfs_file_open,
3664 	.release	= btrfs_release_file,
3665 	.get_unmapped_area = thp_get_unmapped_area,
3666 	.fsync		= btrfs_sync_file,
3667 	.fallocate	= btrfs_fallocate,
3668 	.unlocked_ioctl	= btrfs_ioctl,
3669 #ifdef CONFIG_COMPAT
3670 	.compat_ioctl	= btrfs_compat_ioctl,
3671 #endif
3672 	.remap_file_range = btrfs_remap_file_range,
3673 	.uring_cmd	= btrfs_uring_cmd,
3674 	.fop_flags	= FOP_BUFFER_RASYNC | FOP_BUFFER_WASYNC,
3675 };
3676 
3677 int btrfs_fdatawrite_range(struct btrfs_inode *inode, loff_t start, loff_t end)
3678 {
3679 	struct address_space *mapping = inode->vfs_inode.i_mapping;
3680 	int ret;
3681 
3682 	/*
3683 	 * So with compression we will find and lock a dirty page and clear the
3684 	 * first one as dirty, setup an async extent, and immediately return
3685 	 * with the entire range locked but with nobody actually marked with
3686 	 * writeback.  So we can't just filemap_write_and_wait_range() and
3687 	 * expect it to work since it will just kick off a thread to do the
3688 	 * actual work.  So we need to call filemap_fdatawrite_range _again_
3689 	 * since it will wait on the page lock, which won't be unlocked until
3690 	 * after the pages have been marked as writeback and so we're good to go
3691 	 * from there.  We have to do this otherwise we'll miss the ordered
3692 	 * extents and that results in badness.  Please Josef, do not think you
3693 	 * know better and pull this out at some point in the future, it is
3694 	 * right and you are wrong.
3695 	 */
3696 	ret = filemap_fdatawrite_range(mapping, start, end);
3697 	if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags))
3698 		ret = filemap_fdatawrite_range(mapping, start, end);
3699 
3700 	return ret;
3701 }
3702