xref: /linux/fs/btrfs/inode.c (revision a48395f22b8c8687ceb77ae3014a0eabcd4bf688)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <crypto/hash.h>
7 #include <linux/kernel.h>
8 #include <linux/bio.h>
9 #include <linux/blk-cgroup.h>
10 #include <linux/file.h>
11 #include <linux/fs.h>
12 #include <linux/pagemap.h>
13 #include <linux/highmem.h>
14 #include <linux/time.h>
15 #include <linux/init.h>
16 #include <linux/string.h>
17 #include <linux/backing-dev.h>
18 #include <linux/writeback.h>
19 #include <linux/compat.h>
20 #include <linux/xattr.h>
21 #include <linux/posix_acl.h>
22 #include <linux/falloc.h>
23 #include <linux/slab.h>
24 #include <linux/ratelimit.h>
25 #include <linux/btrfs.h>
26 #include <linux/blkdev.h>
27 #include <linux/posix_acl_xattr.h>
28 #include <linux/uio.h>
29 #include <linux/magic.h>
30 #include <linux/iversion.h>
31 #include <linux/swap.h>
32 #include <linux/migrate.h>
33 #include <linux/sched/mm.h>
34 #include <linux/iomap.h>
35 #include <asm/unaligned.h>
36 #include <linux/fsverity.h>
37 #include "misc.h"
38 #include "ctree.h"
39 #include "disk-io.h"
40 #include "transaction.h"
41 #include "btrfs_inode.h"
42 #include "ordered-data.h"
43 #include "xattr.h"
44 #include "tree-log.h"
45 #include "bio.h"
46 #include "compression.h"
47 #include "locking.h"
48 #include "props.h"
49 #include "qgroup.h"
50 #include "delalloc-space.h"
51 #include "block-group.h"
52 #include "space-info.h"
53 #include "zoned.h"
54 #include "subpage.h"
55 #include "inode-item.h"
56 #include "fs.h"
57 #include "accessors.h"
58 #include "extent-tree.h"
59 #include "root-tree.h"
60 #include "defrag.h"
61 #include "dir-item.h"
62 #include "file-item.h"
63 #include "uuid-tree.h"
64 #include "ioctl.h"
65 #include "file.h"
66 #include "acl.h"
67 #include "relocation.h"
68 #include "verity.h"
69 #include "super.h"
70 #include "orphan.h"
71 #include "backref.h"
72 #include "raid-stripe-tree.h"
73 #include "fiemap.h"
74 
75 struct btrfs_iget_args {
76 	u64 ino;
77 	struct btrfs_root *root;
78 };
79 
80 struct btrfs_rename_ctx {
81 	/* Output field. Stores the index number of the old directory entry. */
82 	u64 index;
83 };
84 
85 /*
86  * Used by data_reloc_print_warning_inode() to pass needed info for filename
87  * resolution and output of error message.
88  */
89 struct data_reloc_warn {
90 	struct btrfs_path path;
91 	struct btrfs_fs_info *fs_info;
92 	u64 extent_item_size;
93 	u64 logical;
94 	int mirror_num;
95 };
96 
97 /*
98  * For the file_extent_tree, we want to hold the inode lock when we lookup and
99  * update the disk_i_size, but lockdep will complain because our io_tree we hold
100  * the tree lock and get the inode lock when setting delalloc. These two things
101  * are unrelated, so make a class for the file_extent_tree so we don't get the
102  * two locking patterns mixed up.
103  */
104 static struct lock_class_key file_extent_tree_class;
105 
106 static const struct inode_operations btrfs_dir_inode_operations;
107 static const struct inode_operations btrfs_symlink_inode_operations;
108 static const struct inode_operations btrfs_special_inode_operations;
109 static const struct inode_operations btrfs_file_inode_operations;
110 static const struct address_space_operations btrfs_aops;
111 static const struct file_operations btrfs_dir_file_operations;
112 
113 static struct kmem_cache *btrfs_inode_cachep;
114 
115 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
116 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback);
117 
118 static noinline int run_delalloc_cow(struct btrfs_inode *inode,
119 				     struct page *locked_page, u64 start,
120 				     u64 end, struct writeback_control *wbc,
121 				     bool pages_dirty);
122 
123 static int data_reloc_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
124 					  u64 root, void *warn_ctx)
125 {
126 	struct data_reloc_warn *warn = warn_ctx;
127 	struct btrfs_fs_info *fs_info = warn->fs_info;
128 	struct extent_buffer *eb;
129 	struct btrfs_inode_item *inode_item;
130 	struct inode_fs_paths *ipath = NULL;
131 	struct btrfs_root *local_root;
132 	struct btrfs_key key;
133 	unsigned int nofs_flag;
134 	u32 nlink;
135 	int ret;
136 
137 	local_root = btrfs_get_fs_root(fs_info, root, true);
138 	if (IS_ERR(local_root)) {
139 		ret = PTR_ERR(local_root);
140 		goto err;
141 	}
142 
143 	/* This makes the path point to (inum INODE_ITEM ioff). */
144 	key.objectid = inum;
145 	key.type = BTRFS_INODE_ITEM_KEY;
146 	key.offset = 0;
147 
148 	ret = btrfs_search_slot(NULL, local_root, &key, &warn->path, 0, 0);
149 	if (ret) {
150 		btrfs_put_root(local_root);
151 		btrfs_release_path(&warn->path);
152 		goto err;
153 	}
154 
155 	eb = warn->path.nodes[0];
156 	inode_item = btrfs_item_ptr(eb, warn->path.slots[0], struct btrfs_inode_item);
157 	nlink = btrfs_inode_nlink(eb, inode_item);
158 	btrfs_release_path(&warn->path);
159 
160 	nofs_flag = memalloc_nofs_save();
161 	ipath = init_ipath(4096, local_root, &warn->path);
162 	memalloc_nofs_restore(nofs_flag);
163 	if (IS_ERR(ipath)) {
164 		btrfs_put_root(local_root);
165 		ret = PTR_ERR(ipath);
166 		ipath = NULL;
167 		/*
168 		 * -ENOMEM, not a critical error, just output an generic error
169 		 * without filename.
170 		 */
171 		btrfs_warn(fs_info,
172 "checksum error at logical %llu mirror %u root %llu, inode %llu offset %llu",
173 			   warn->logical, warn->mirror_num, root, inum, offset);
174 		return ret;
175 	}
176 	ret = paths_from_inode(inum, ipath);
177 	if (ret < 0)
178 		goto err;
179 
180 	/*
181 	 * We deliberately ignore the bit ipath might have been too small to
182 	 * hold all of the paths here
183 	 */
184 	for (int i = 0; i < ipath->fspath->elem_cnt; i++) {
185 		btrfs_warn(fs_info,
186 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu length %u links %u (path: %s)",
187 			   warn->logical, warn->mirror_num, root, inum, offset,
188 			   fs_info->sectorsize, nlink,
189 			   (char *)(unsigned long)ipath->fspath->val[i]);
190 	}
191 
192 	btrfs_put_root(local_root);
193 	free_ipath(ipath);
194 	return 0;
195 
196 err:
197 	btrfs_warn(fs_info,
198 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu, path resolving failed with ret=%d",
199 		   warn->logical, warn->mirror_num, root, inum, offset, ret);
200 
201 	free_ipath(ipath);
202 	return ret;
203 }
204 
205 /*
206  * Do extra user-friendly error output (e.g. lookup all the affected files).
207  *
208  * Return true if we succeeded doing the backref lookup.
209  * Return false if such lookup failed, and has to fallback to the old error message.
210  */
211 static void print_data_reloc_error(const struct btrfs_inode *inode, u64 file_off,
212 				   const u8 *csum, const u8 *csum_expected,
213 				   int mirror_num)
214 {
215 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
216 	struct btrfs_path path = { 0 };
217 	struct btrfs_key found_key = { 0 };
218 	struct extent_buffer *eb;
219 	struct btrfs_extent_item *ei;
220 	const u32 csum_size = fs_info->csum_size;
221 	u64 logical;
222 	u64 flags;
223 	u32 item_size;
224 	int ret;
225 
226 	mutex_lock(&fs_info->reloc_mutex);
227 	logical = btrfs_get_reloc_bg_bytenr(fs_info);
228 	mutex_unlock(&fs_info->reloc_mutex);
229 
230 	if (logical == U64_MAX) {
231 		btrfs_warn_rl(fs_info, "has data reloc tree but no running relocation");
232 		btrfs_warn_rl(fs_info,
233 "csum failed root %lld ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
234 			btrfs_root_id(inode->root), btrfs_ino(inode), file_off,
235 			CSUM_FMT_VALUE(csum_size, csum),
236 			CSUM_FMT_VALUE(csum_size, csum_expected),
237 			mirror_num);
238 		return;
239 	}
240 
241 	logical += file_off;
242 	btrfs_warn_rl(fs_info,
243 "csum failed root %lld ino %llu off %llu logical %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
244 			btrfs_root_id(inode->root),
245 			btrfs_ino(inode), file_off, logical,
246 			CSUM_FMT_VALUE(csum_size, csum),
247 			CSUM_FMT_VALUE(csum_size, csum_expected),
248 			mirror_num);
249 
250 	ret = extent_from_logical(fs_info, logical, &path, &found_key, &flags);
251 	if (ret < 0) {
252 		btrfs_err_rl(fs_info, "failed to lookup extent item for logical %llu: %d",
253 			     logical, ret);
254 		return;
255 	}
256 	eb = path.nodes[0];
257 	ei = btrfs_item_ptr(eb, path.slots[0], struct btrfs_extent_item);
258 	item_size = btrfs_item_size(eb, path.slots[0]);
259 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
260 		unsigned long ptr = 0;
261 		u64 ref_root;
262 		u8 ref_level;
263 
264 		while (true) {
265 			ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
266 						      item_size, &ref_root,
267 						      &ref_level);
268 			if (ret < 0) {
269 				btrfs_warn_rl(fs_info,
270 				"failed to resolve tree backref for logical %llu: %d",
271 					      logical, ret);
272 				break;
273 			}
274 			if (ret > 0)
275 				break;
276 
277 			btrfs_warn_rl(fs_info,
278 "csum error at logical %llu mirror %u: metadata %s (level %d) in tree %llu",
279 				logical, mirror_num,
280 				(ref_level ? "node" : "leaf"),
281 				ref_level, ref_root);
282 		}
283 		btrfs_release_path(&path);
284 	} else {
285 		struct btrfs_backref_walk_ctx ctx = { 0 };
286 		struct data_reloc_warn reloc_warn = { 0 };
287 
288 		btrfs_release_path(&path);
289 
290 		ctx.bytenr = found_key.objectid;
291 		ctx.extent_item_pos = logical - found_key.objectid;
292 		ctx.fs_info = fs_info;
293 
294 		reloc_warn.logical = logical;
295 		reloc_warn.extent_item_size = found_key.offset;
296 		reloc_warn.mirror_num = mirror_num;
297 		reloc_warn.fs_info = fs_info;
298 
299 		iterate_extent_inodes(&ctx, true,
300 				      data_reloc_print_warning_inode, &reloc_warn);
301 	}
302 }
303 
304 static void __cold btrfs_print_data_csum_error(struct btrfs_inode *inode,
305 		u64 logical_start, u8 *csum, u8 *csum_expected, int mirror_num)
306 {
307 	struct btrfs_root *root = inode->root;
308 	const u32 csum_size = root->fs_info->csum_size;
309 
310 	/* For data reloc tree, it's better to do a backref lookup instead. */
311 	if (btrfs_root_id(root) == BTRFS_DATA_RELOC_TREE_OBJECTID)
312 		return print_data_reloc_error(inode, logical_start, csum,
313 					      csum_expected, mirror_num);
314 
315 	/* Output without objectid, which is more meaningful */
316 	if (btrfs_root_id(root) >= BTRFS_LAST_FREE_OBJECTID) {
317 		btrfs_warn_rl(root->fs_info,
318 "csum failed root %lld ino %lld off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
319 			btrfs_root_id(root), btrfs_ino(inode),
320 			logical_start,
321 			CSUM_FMT_VALUE(csum_size, csum),
322 			CSUM_FMT_VALUE(csum_size, csum_expected),
323 			mirror_num);
324 	} else {
325 		btrfs_warn_rl(root->fs_info,
326 "csum failed root %llu ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
327 			btrfs_root_id(root), btrfs_ino(inode),
328 			logical_start,
329 			CSUM_FMT_VALUE(csum_size, csum),
330 			CSUM_FMT_VALUE(csum_size, csum_expected),
331 			mirror_num);
332 	}
333 }
334 
335 /*
336  * Lock inode i_rwsem based on arguments passed.
337  *
338  * ilock_flags can have the following bit set:
339  *
340  * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode
341  * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt
342  *		     return -EAGAIN
343  * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock
344  */
345 int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags)
346 {
347 	if (ilock_flags & BTRFS_ILOCK_SHARED) {
348 		if (ilock_flags & BTRFS_ILOCK_TRY) {
349 			if (!inode_trylock_shared(&inode->vfs_inode))
350 				return -EAGAIN;
351 			else
352 				return 0;
353 		}
354 		inode_lock_shared(&inode->vfs_inode);
355 	} else {
356 		if (ilock_flags & BTRFS_ILOCK_TRY) {
357 			if (!inode_trylock(&inode->vfs_inode))
358 				return -EAGAIN;
359 			else
360 				return 0;
361 		}
362 		inode_lock(&inode->vfs_inode);
363 	}
364 	if (ilock_flags & BTRFS_ILOCK_MMAP)
365 		down_write(&inode->i_mmap_lock);
366 	return 0;
367 }
368 
369 /*
370  * Unock inode i_rwsem.
371  *
372  * ilock_flags should contain the same bits set as passed to btrfs_inode_lock()
373  * to decide whether the lock acquired is shared or exclusive.
374  */
375 void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags)
376 {
377 	if (ilock_flags & BTRFS_ILOCK_MMAP)
378 		up_write(&inode->i_mmap_lock);
379 	if (ilock_flags & BTRFS_ILOCK_SHARED)
380 		inode_unlock_shared(&inode->vfs_inode);
381 	else
382 		inode_unlock(&inode->vfs_inode);
383 }
384 
385 /*
386  * Cleanup all submitted ordered extents in specified range to handle errors
387  * from the btrfs_run_delalloc_range() callback.
388  *
389  * NOTE: caller must ensure that when an error happens, it can not call
390  * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
391  * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
392  * to be released, which we want to happen only when finishing the ordered
393  * extent (btrfs_finish_ordered_io()).
394  */
395 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
396 						 struct page *locked_page,
397 						 u64 offset, u64 bytes)
398 {
399 	unsigned long index = offset >> PAGE_SHIFT;
400 	unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
401 	u64 page_start = 0, page_end = 0;
402 	struct page *page;
403 
404 	if (locked_page) {
405 		page_start = page_offset(locked_page);
406 		page_end = page_start + PAGE_SIZE - 1;
407 	}
408 
409 	while (index <= end_index) {
410 		/*
411 		 * For locked page, we will call btrfs_mark_ordered_io_finished
412 		 * through btrfs_mark_ordered_io_finished() on it
413 		 * in run_delalloc_range() for the error handling, which will
414 		 * clear page Ordered and run the ordered extent accounting.
415 		 *
416 		 * Here we can't just clear the Ordered bit, or
417 		 * btrfs_mark_ordered_io_finished() would skip the accounting
418 		 * for the page range, and the ordered extent will never finish.
419 		 */
420 		if (locked_page && index == (page_start >> PAGE_SHIFT)) {
421 			index++;
422 			continue;
423 		}
424 		page = find_get_page(inode->vfs_inode.i_mapping, index);
425 		index++;
426 		if (!page)
427 			continue;
428 
429 		/*
430 		 * Here we just clear all Ordered bits for every page in the
431 		 * range, then btrfs_mark_ordered_io_finished() will handle
432 		 * the ordered extent accounting for the range.
433 		 */
434 		btrfs_folio_clamp_clear_ordered(inode->root->fs_info,
435 						page_folio(page), offset, bytes);
436 		put_page(page);
437 	}
438 
439 	if (locked_page) {
440 		/* The locked page covers the full range, nothing needs to be done */
441 		if (bytes + offset <= page_start + PAGE_SIZE)
442 			return;
443 		/*
444 		 * In case this page belongs to the delalloc range being
445 		 * instantiated then skip it, since the first page of a range is
446 		 * going to be properly cleaned up by the caller of
447 		 * run_delalloc_range
448 		 */
449 		if (page_start >= offset && page_end <= (offset + bytes - 1)) {
450 			bytes = offset + bytes - page_offset(locked_page) - PAGE_SIZE;
451 			offset = page_offset(locked_page) + PAGE_SIZE;
452 		}
453 	}
454 
455 	return btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes, false);
456 }
457 
458 static int btrfs_dirty_inode(struct btrfs_inode *inode);
459 
460 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
461 				     struct btrfs_new_inode_args *args)
462 {
463 	int err;
464 
465 	if (args->default_acl) {
466 		err = __btrfs_set_acl(trans, args->inode, args->default_acl,
467 				      ACL_TYPE_DEFAULT);
468 		if (err)
469 			return err;
470 	}
471 	if (args->acl) {
472 		err = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS);
473 		if (err)
474 			return err;
475 	}
476 	if (!args->default_acl && !args->acl)
477 		cache_no_acl(args->inode);
478 	return btrfs_xattr_security_init(trans, args->inode, args->dir,
479 					 &args->dentry->d_name);
480 }
481 
482 /*
483  * this does all the hard work for inserting an inline extent into
484  * the btree.  The caller should have done a btrfs_drop_extents so that
485  * no overlapping inline items exist in the btree
486  */
487 static int insert_inline_extent(struct btrfs_trans_handle *trans,
488 				struct btrfs_path *path,
489 				struct btrfs_inode *inode, bool extent_inserted,
490 				size_t size, size_t compressed_size,
491 				int compress_type,
492 				struct folio *compressed_folio,
493 				bool update_i_size)
494 {
495 	struct btrfs_root *root = inode->root;
496 	struct extent_buffer *leaf;
497 	struct page *page = NULL;
498 	const u32 sectorsize = trans->fs_info->sectorsize;
499 	char *kaddr;
500 	unsigned long ptr;
501 	struct btrfs_file_extent_item *ei;
502 	int ret;
503 	size_t cur_size = size;
504 	u64 i_size;
505 
506 	/*
507 	 * The decompressed size must still be no larger than a sector.  Under
508 	 * heavy race, we can have size == 0 passed in, but that shouldn't be a
509 	 * big deal and we can continue the insertion.
510 	 */
511 	ASSERT(size <= sectorsize);
512 
513 	/*
514 	 * The compressed size also needs to be no larger than a sector.
515 	 * That's also why we only need one page as the parameter.
516 	 */
517 	if (compressed_folio)
518 		ASSERT(compressed_size <= sectorsize);
519 	else
520 		ASSERT(compressed_size == 0);
521 
522 	if (compressed_size && compressed_folio)
523 		cur_size = compressed_size;
524 
525 	if (!extent_inserted) {
526 		struct btrfs_key key;
527 		size_t datasize;
528 
529 		key.objectid = btrfs_ino(inode);
530 		key.offset = 0;
531 		key.type = BTRFS_EXTENT_DATA_KEY;
532 
533 		datasize = btrfs_file_extent_calc_inline_size(cur_size);
534 		ret = btrfs_insert_empty_item(trans, root, path, &key,
535 					      datasize);
536 		if (ret)
537 			goto fail;
538 	}
539 	leaf = path->nodes[0];
540 	ei = btrfs_item_ptr(leaf, path->slots[0],
541 			    struct btrfs_file_extent_item);
542 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
543 	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
544 	btrfs_set_file_extent_encryption(leaf, ei, 0);
545 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
546 	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
547 	ptr = btrfs_file_extent_inline_start(ei);
548 
549 	if (compress_type != BTRFS_COMPRESS_NONE) {
550 		kaddr = kmap_local_folio(compressed_folio, 0);
551 		write_extent_buffer(leaf, kaddr, ptr, compressed_size);
552 		kunmap_local(kaddr);
553 
554 		btrfs_set_file_extent_compression(leaf, ei,
555 						  compress_type);
556 	} else {
557 		page = find_get_page(inode->vfs_inode.i_mapping, 0);
558 		btrfs_set_file_extent_compression(leaf, ei, 0);
559 		kaddr = kmap_local_page(page);
560 		write_extent_buffer(leaf, kaddr, ptr, size);
561 		kunmap_local(kaddr);
562 		put_page(page);
563 	}
564 	btrfs_mark_buffer_dirty(trans, leaf);
565 	btrfs_release_path(path);
566 
567 	/*
568 	 * We align size to sectorsize for inline extents just for simplicity
569 	 * sake.
570 	 */
571 	ret = btrfs_inode_set_file_extent_range(inode, 0,
572 					ALIGN(size, root->fs_info->sectorsize));
573 	if (ret)
574 		goto fail;
575 
576 	/*
577 	 * We're an inline extent, so nobody can extend the file past i_size
578 	 * without locking a page we already have locked.
579 	 *
580 	 * We must do any i_size and inode updates before we unlock the pages.
581 	 * Otherwise we could end up racing with unlink.
582 	 */
583 	i_size = i_size_read(&inode->vfs_inode);
584 	if (update_i_size && size > i_size) {
585 		i_size_write(&inode->vfs_inode, size);
586 		i_size = size;
587 	}
588 	inode->disk_i_size = i_size;
589 
590 fail:
591 	return ret;
592 }
593 
594 static bool can_cow_file_range_inline(struct btrfs_inode *inode,
595 				      u64 offset, u64 size,
596 				      size_t compressed_size)
597 {
598 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
599 	u64 data_len = (compressed_size ?: size);
600 
601 	/* Inline extents must start at offset 0. */
602 	if (offset != 0)
603 		return false;
604 
605 	/*
606 	 * Due to the page size limit, for subpage we can only trigger the
607 	 * writeback for the dirty sectors of page, that means data writeback
608 	 * is doing more writeback than what we want.
609 	 *
610 	 * This is especially unexpected for some call sites like fallocate,
611 	 * where we only increase i_size after everything is done.
612 	 * This means we can trigger inline extent even if we didn't want to.
613 	 * So here we skip inline extent creation completely.
614 	 */
615 	if (fs_info->sectorsize != PAGE_SIZE)
616 		return false;
617 
618 	/* Inline extents are limited to sectorsize. */
619 	if (size > fs_info->sectorsize)
620 		return false;
621 
622 	/* We cannot exceed the maximum inline data size. */
623 	if (data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
624 		return false;
625 
626 	/* We cannot exceed the user specified max_inline size. */
627 	if (data_len > fs_info->max_inline)
628 		return false;
629 
630 	/* Inline extents must be the entirety of the file. */
631 	if (size < i_size_read(&inode->vfs_inode))
632 		return false;
633 
634 	return true;
635 }
636 
637 /*
638  * conditionally insert an inline extent into the file.  This
639  * does the checks required to make sure the data is small enough
640  * to fit as an inline extent.
641  *
642  * If being used directly, you must have already checked we're allowed to cow
643  * the range by getting true from can_cow_file_range_inline().
644  */
645 static noinline int __cow_file_range_inline(struct btrfs_inode *inode, u64 offset,
646 					    u64 size, size_t compressed_size,
647 					    int compress_type,
648 					    struct folio *compressed_folio,
649 					    bool update_i_size)
650 {
651 	struct btrfs_drop_extents_args drop_args = { 0 };
652 	struct btrfs_root *root = inode->root;
653 	struct btrfs_fs_info *fs_info = root->fs_info;
654 	struct btrfs_trans_handle *trans;
655 	u64 data_len = (compressed_size ?: size);
656 	int ret;
657 	struct btrfs_path *path;
658 
659 	path = btrfs_alloc_path();
660 	if (!path)
661 		return -ENOMEM;
662 
663 	trans = btrfs_join_transaction(root);
664 	if (IS_ERR(trans)) {
665 		btrfs_free_path(path);
666 		return PTR_ERR(trans);
667 	}
668 	trans->block_rsv = &inode->block_rsv;
669 
670 	drop_args.path = path;
671 	drop_args.start = 0;
672 	drop_args.end = fs_info->sectorsize;
673 	drop_args.drop_cache = true;
674 	drop_args.replace_extent = true;
675 	drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(data_len);
676 	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
677 	if (ret) {
678 		btrfs_abort_transaction(trans, ret);
679 		goto out;
680 	}
681 
682 	ret = insert_inline_extent(trans, path, inode, drop_args.extent_inserted,
683 				   size, compressed_size, compress_type,
684 				   compressed_folio, update_i_size);
685 	if (ret && ret != -ENOSPC) {
686 		btrfs_abort_transaction(trans, ret);
687 		goto out;
688 	} else if (ret == -ENOSPC) {
689 		ret = 1;
690 		goto out;
691 	}
692 
693 	btrfs_update_inode_bytes(inode, size, drop_args.bytes_found);
694 	ret = btrfs_update_inode(trans, inode);
695 	if (ret && ret != -ENOSPC) {
696 		btrfs_abort_transaction(trans, ret);
697 		goto out;
698 	} else if (ret == -ENOSPC) {
699 		ret = 1;
700 		goto out;
701 	}
702 
703 	btrfs_set_inode_full_sync(inode);
704 out:
705 	/*
706 	 * Don't forget to free the reserved space, as for inlined extent
707 	 * it won't count as data extent, free them directly here.
708 	 * And at reserve time, it's always aligned to page size, so
709 	 * just free one page here.
710 	 */
711 	btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE, NULL);
712 	btrfs_free_path(path);
713 	btrfs_end_transaction(trans);
714 	return ret;
715 }
716 
717 static noinline int cow_file_range_inline(struct btrfs_inode *inode,
718 					  struct page *locked_page,
719 					  u64 offset, u64 end,
720 					  size_t compressed_size,
721 					  int compress_type,
722 					  struct folio *compressed_folio,
723 					  bool update_i_size)
724 {
725 	struct extent_state *cached = NULL;
726 	unsigned long clear_flags = EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
727 		EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING | EXTENT_LOCKED;
728 	u64 size = min_t(u64, i_size_read(&inode->vfs_inode), end + 1);
729 	int ret;
730 
731 	if (!can_cow_file_range_inline(inode, offset, size, compressed_size))
732 		return 1;
733 
734 	lock_extent(&inode->io_tree, offset, end, &cached);
735 	ret = __cow_file_range_inline(inode, offset, size, compressed_size,
736 				      compress_type, compressed_folio,
737 				      update_i_size);
738 	if (ret > 0) {
739 		unlock_extent(&inode->io_tree, offset, end, &cached);
740 		return ret;
741 	}
742 
743 	if (ret == 0)
744 		locked_page = NULL;
745 
746 	extent_clear_unlock_delalloc(inode, offset, end, locked_page, &cached,
747 				     clear_flags,
748 				     PAGE_UNLOCK | PAGE_START_WRITEBACK |
749 				     PAGE_END_WRITEBACK);
750 	return ret;
751 }
752 
753 struct async_extent {
754 	u64 start;
755 	u64 ram_size;
756 	u64 compressed_size;
757 	struct folio **folios;
758 	unsigned long nr_folios;
759 	int compress_type;
760 	struct list_head list;
761 };
762 
763 struct async_chunk {
764 	struct btrfs_inode *inode;
765 	struct page *locked_page;
766 	u64 start;
767 	u64 end;
768 	blk_opf_t write_flags;
769 	struct list_head extents;
770 	struct cgroup_subsys_state *blkcg_css;
771 	struct btrfs_work work;
772 	struct async_cow *async_cow;
773 };
774 
775 struct async_cow {
776 	atomic_t num_chunks;
777 	struct async_chunk chunks[];
778 };
779 
780 static noinline int add_async_extent(struct async_chunk *cow,
781 				     u64 start, u64 ram_size,
782 				     u64 compressed_size,
783 				     struct folio **folios,
784 				     unsigned long nr_folios,
785 				     int compress_type)
786 {
787 	struct async_extent *async_extent;
788 
789 	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
790 	if (!async_extent)
791 		return -ENOMEM;
792 	async_extent->start = start;
793 	async_extent->ram_size = ram_size;
794 	async_extent->compressed_size = compressed_size;
795 	async_extent->folios = folios;
796 	async_extent->nr_folios = nr_folios;
797 	async_extent->compress_type = compress_type;
798 	list_add_tail(&async_extent->list, &cow->extents);
799 	return 0;
800 }
801 
802 /*
803  * Check if the inode needs to be submitted to compression, based on mount
804  * options, defragmentation, properties or heuristics.
805  */
806 static inline int inode_need_compress(struct btrfs_inode *inode, u64 start,
807 				      u64 end)
808 {
809 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
810 
811 	if (!btrfs_inode_can_compress(inode)) {
812 		WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
813 			KERN_ERR "BTRFS: unexpected compression for ino %llu\n",
814 			btrfs_ino(inode));
815 		return 0;
816 	}
817 	/*
818 	 * Special check for subpage.
819 	 *
820 	 * We lock the full page then run each delalloc range in the page, thus
821 	 * for the following case, we will hit some subpage specific corner case:
822 	 *
823 	 * 0		32K		64K
824 	 * |	|///////|	|///////|
825 	 *		\- A		\- B
826 	 *
827 	 * In above case, both range A and range B will try to unlock the full
828 	 * page [0, 64K), causing the one finished later will have page
829 	 * unlocked already, triggering various page lock requirement BUG_ON()s.
830 	 *
831 	 * So here we add an artificial limit that subpage compression can only
832 	 * if the range is fully page aligned.
833 	 *
834 	 * In theory we only need to ensure the first page is fully covered, but
835 	 * the tailing partial page will be locked until the full compression
836 	 * finishes, delaying the write of other range.
837 	 *
838 	 * TODO: Make btrfs_run_delalloc_range() to lock all delalloc range
839 	 * first to prevent any submitted async extent to unlock the full page.
840 	 * By this, we can ensure for subpage case that only the last async_cow
841 	 * will unlock the full page.
842 	 */
843 	if (fs_info->sectorsize < PAGE_SIZE) {
844 		if (!PAGE_ALIGNED(start) ||
845 		    !PAGE_ALIGNED(end + 1))
846 			return 0;
847 	}
848 
849 	/* force compress */
850 	if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
851 		return 1;
852 	/* defrag ioctl */
853 	if (inode->defrag_compress)
854 		return 1;
855 	/* bad compression ratios */
856 	if (inode->flags & BTRFS_INODE_NOCOMPRESS)
857 		return 0;
858 	if (btrfs_test_opt(fs_info, COMPRESS) ||
859 	    inode->flags & BTRFS_INODE_COMPRESS ||
860 	    inode->prop_compress)
861 		return btrfs_compress_heuristic(inode, start, end);
862 	return 0;
863 }
864 
865 static inline void inode_should_defrag(struct btrfs_inode *inode,
866 		u64 start, u64 end, u64 num_bytes, u32 small_write)
867 {
868 	/* If this is a small write inside eof, kick off a defrag */
869 	if (num_bytes < small_write &&
870 	    (start > 0 || end + 1 < inode->disk_i_size))
871 		btrfs_add_inode_defrag(NULL, inode, small_write);
872 }
873 
874 static int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
875 {
876 	unsigned long end_index = end >> PAGE_SHIFT;
877 	struct page *page;
878 	int ret = 0;
879 
880 	for (unsigned long index = start >> PAGE_SHIFT;
881 	     index <= end_index; index++) {
882 		page = find_get_page(inode->i_mapping, index);
883 		if (unlikely(!page)) {
884 			if (!ret)
885 				ret = -ENOENT;
886 			continue;
887 		}
888 		clear_page_dirty_for_io(page);
889 		put_page(page);
890 	}
891 	return ret;
892 }
893 
894 /*
895  * Work queue call back to started compression on a file and pages.
896  *
897  * This is done inside an ordered work queue, and the compression is spread
898  * across many cpus.  The actual IO submission is step two, and the ordered work
899  * queue takes care of making sure that happens in the same order things were
900  * put onto the queue by writepages and friends.
901  *
902  * If this code finds it can't get good compression, it puts an entry onto the
903  * work queue to write the uncompressed bytes.  This makes sure that both
904  * compressed inodes and uncompressed inodes are written in the same order that
905  * the flusher thread sent them down.
906  */
907 static void compress_file_range(struct btrfs_work *work)
908 {
909 	struct async_chunk *async_chunk =
910 		container_of(work, struct async_chunk, work);
911 	struct btrfs_inode *inode = async_chunk->inode;
912 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
913 	struct address_space *mapping = inode->vfs_inode.i_mapping;
914 	u64 blocksize = fs_info->sectorsize;
915 	u64 start = async_chunk->start;
916 	u64 end = async_chunk->end;
917 	u64 actual_end;
918 	u64 i_size;
919 	int ret = 0;
920 	struct folio **folios;
921 	unsigned long nr_folios;
922 	unsigned long total_compressed = 0;
923 	unsigned long total_in = 0;
924 	unsigned int poff;
925 	int i;
926 	int compress_type = fs_info->compress_type;
927 
928 	inode_should_defrag(inode, start, end, end - start + 1, SZ_16K);
929 
930 	/*
931 	 * We need to call clear_page_dirty_for_io on each page in the range.
932 	 * Otherwise applications with the file mmap'd can wander in and change
933 	 * the page contents while we are compressing them.
934 	 */
935 	ret = extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end);
936 
937 	/*
938 	 * All the folios should have been locked thus no failure.
939 	 *
940 	 * And even if some folios are missing, btrfs_compress_folios()
941 	 * would handle them correctly, so here just do an ASSERT() check for
942 	 * early logic errors.
943 	 */
944 	ASSERT(ret == 0);
945 
946 	/*
947 	 * We need to save i_size before now because it could change in between
948 	 * us evaluating the size and assigning it.  This is because we lock and
949 	 * unlock the page in truncate and fallocate, and then modify the i_size
950 	 * later on.
951 	 *
952 	 * The barriers are to emulate READ_ONCE, remove that once i_size_read
953 	 * does that for us.
954 	 */
955 	barrier();
956 	i_size = i_size_read(&inode->vfs_inode);
957 	barrier();
958 	actual_end = min_t(u64, i_size, end + 1);
959 again:
960 	folios = NULL;
961 	nr_folios = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
962 	nr_folios = min_t(unsigned long, nr_folios, BTRFS_MAX_COMPRESSED_PAGES);
963 
964 	/*
965 	 * we don't want to send crud past the end of i_size through
966 	 * compression, that's just a waste of CPU time.  So, if the
967 	 * end of the file is before the start of our current
968 	 * requested range of bytes, we bail out to the uncompressed
969 	 * cleanup code that can deal with all of this.
970 	 *
971 	 * It isn't really the fastest way to fix things, but this is a
972 	 * very uncommon corner.
973 	 */
974 	if (actual_end <= start)
975 		goto cleanup_and_bail_uncompressed;
976 
977 	total_compressed = actual_end - start;
978 
979 	/*
980 	 * Skip compression for a small file range(<=blocksize) that
981 	 * isn't an inline extent, since it doesn't save disk space at all.
982 	 */
983 	if (total_compressed <= blocksize &&
984 	   (start > 0 || end + 1 < inode->disk_i_size))
985 		goto cleanup_and_bail_uncompressed;
986 
987 	/*
988 	 * For subpage case, we require full page alignment for the sector
989 	 * aligned range.
990 	 * Thus we must also check against @actual_end, not just @end.
991 	 */
992 	if (blocksize < PAGE_SIZE) {
993 		if (!PAGE_ALIGNED(start) ||
994 		    !PAGE_ALIGNED(round_up(actual_end, blocksize)))
995 			goto cleanup_and_bail_uncompressed;
996 	}
997 
998 	total_compressed = min_t(unsigned long, total_compressed,
999 			BTRFS_MAX_UNCOMPRESSED);
1000 	total_in = 0;
1001 	ret = 0;
1002 
1003 	/*
1004 	 * We do compression for mount -o compress and when the inode has not
1005 	 * been flagged as NOCOMPRESS.  This flag can change at any time if we
1006 	 * discover bad compression ratios.
1007 	 */
1008 	if (!inode_need_compress(inode, start, end))
1009 		goto cleanup_and_bail_uncompressed;
1010 
1011 	folios = kcalloc(nr_folios, sizeof(struct folio *), GFP_NOFS);
1012 	if (!folios) {
1013 		/*
1014 		 * Memory allocation failure is not a fatal error, we can fall
1015 		 * back to uncompressed code.
1016 		 */
1017 		goto cleanup_and_bail_uncompressed;
1018 	}
1019 
1020 	if (inode->defrag_compress)
1021 		compress_type = inode->defrag_compress;
1022 	else if (inode->prop_compress)
1023 		compress_type = inode->prop_compress;
1024 
1025 	/* Compression level is applied here. */
1026 	ret = btrfs_compress_folios(compress_type | (fs_info->compress_level << 4),
1027 				    mapping, start, folios, &nr_folios, &total_in,
1028 				    &total_compressed);
1029 	if (ret)
1030 		goto mark_incompressible;
1031 
1032 	/*
1033 	 * Zero the tail end of the last page, as we might be sending it down
1034 	 * to disk.
1035 	 */
1036 	poff = offset_in_page(total_compressed);
1037 	if (poff)
1038 		folio_zero_range(folios[nr_folios - 1], poff, PAGE_SIZE - poff);
1039 
1040 	/*
1041 	 * Try to create an inline extent.
1042 	 *
1043 	 * If we didn't compress the entire range, try to create an uncompressed
1044 	 * inline extent, else a compressed one.
1045 	 *
1046 	 * Check cow_file_range() for why we don't even try to create inline
1047 	 * extent for the subpage case.
1048 	 */
1049 	if (total_in < actual_end)
1050 		ret = cow_file_range_inline(inode, NULL, start, end, 0,
1051 					    BTRFS_COMPRESS_NONE, NULL, false);
1052 	else
1053 		ret = cow_file_range_inline(inode, NULL, start, end, total_compressed,
1054 					    compress_type, folios[0], false);
1055 	if (ret <= 0) {
1056 		if (ret < 0)
1057 			mapping_set_error(mapping, -EIO);
1058 		goto free_pages;
1059 	}
1060 
1061 	/*
1062 	 * We aren't doing an inline extent. Round the compressed size up to a
1063 	 * block size boundary so the allocator does sane things.
1064 	 */
1065 	total_compressed = ALIGN(total_compressed, blocksize);
1066 
1067 	/*
1068 	 * One last check to make sure the compression is really a win, compare
1069 	 * the page count read with the blocks on disk, compression must free at
1070 	 * least one sector.
1071 	 */
1072 	total_in = round_up(total_in, fs_info->sectorsize);
1073 	if (total_compressed + blocksize > total_in)
1074 		goto mark_incompressible;
1075 
1076 	/*
1077 	 * The async work queues will take care of doing actual allocation on
1078 	 * disk for these compressed pages, and will submit the bios.
1079 	 */
1080 	ret = add_async_extent(async_chunk, start, total_in, total_compressed, folios,
1081 			       nr_folios, compress_type);
1082 	BUG_ON(ret);
1083 	if (start + total_in < end) {
1084 		start += total_in;
1085 		cond_resched();
1086 		goto again;
1087 	}
1088 	return;
1089 
1090 mark_incompressible:
1091 	if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && !inode->prop_compress)
1092 		inode->flags |= BTRFS_INODE_NOCOMPRESS;
1093 cleanup_and_bail_uncompressed:
1094 	ret = add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0,
1095 			       BTRFS_COMPRESS_NONE);
1096 	BUG_ON(ret);
1097 free_pages:
1098 	if (folios) {
1099 		for (i = 0; i < nr_folios; i++) {
1100 			WARN_ON(folios[i]->mapping);
1101 			btrfs_free_compr_folio(folios[i]);
1102 		}
1103 		kfree(folios);
1104 	}
1105 }
1106 
1107 static void free_async_extent_pages(struct async_extent *async_extent)
1108 {
1109 	int i;
1110 
1111 	if (!async_extent->folios)
1112 		return;
1113 
1114 	for (i = 0; i < async_extent->nr_folios; i++) {
1115 		WARN_ON(async_extent->folios[i]->mapping);
1116 		btrfs_free_compr_folio(async_extent->folios[i]);
1117 	}
1118 	kfree(async_extent->folios);
1119 	async_extent->nr_folios = 0;
1120 	async_extent->folios = NULL;
1121 }
1122 
1123 static void submit_uncompressed_range(struct btrfs_inode *inode,
1124 				      struct async_extent *async_extent,
1125 				      struct page *locked_page)
1126 {
1127 	u64 start = async_extent->start;
1128 	u64 end = async_extent->start + async_extent->ram_size - 1;
1129 	int ret;
1130 	struct writeback_control wbc = {
1131 		.sync_mode		= WB_SYNC_ALL,
1132 		.range_start		= start,
1133 		.range_end		= end,
1134 		.no_cgroup_owner	= 1,
1135 	};
1136 
1137 	wbc_attach_fdatawrite_inode(&wbc, &inode->vfs_inode);
1138 	ret = run_delalloc_cow(inode, locked_page, start, end, &wbc, false);
1139 	wbc_detach_inode(&wbc);
1140 	if (ret < 0) {
1141 		btrfs_cleanup_ordered_extents(inode, locked_page, start, end - start + 1);
1142 		if (locked_page) {
1143 			const u64 page_start = page_offset(locked_page);
1144 
1145 			set_page_writeback(locked_page);
1146 			end_page_writeback(locked_page);
1147 			btrfs_mark_ordered_io_finished(inode, locked_page,
1148 						       page_start, PAGE_SIZE,
1149 						       !ret);
1150 			mapping_set_error(locked_page->mapping, ret);
1151 			unlock_page(locked_page);
1152 		}
1153 	}
1154 }
1155 
1156 static void submit_one_async_extent(struct async_chunk *async_chunk,
1157 				    struct async_extent *async_extent,
1158 				    u64 *alloc_hint)
1159 {
1160 	struct btrfs_inode *inode = async_chunk->inode;
1161 	struct extent_io_tree *io_tree = &inode->io_tree;
1162 	struct btrfs_root *root = inode->root;
1163 	struct btrfs_fs_info *fs_info = root->fs_info;
1164 	struct btrfs_ordered_extent *ordered;
1165 	struct btrfs_file_extent file_extent;
1166 	struct btrfs_key ins;
1167 	struct page *locked_page = NULL;
1168 	struct extent_state *cached = NULL;
1169 	struct extent_map *em;
1170 	int ret = 0;
1171 	u64 start = async_extent->start;
1172 	u64 end = async_extent->start + async_extent->ram_size - 1;
1173 
1174 	if (async_chunk->blkcg_css)
1175 		kthread_associate_blkcg(async_chunk->blkcg_css);
1176 
1177 	/*
1178 	 * If async_chunk->locked_page is in the async_extent range, we need to
1179 	 * handle it.
1180 	 */
1181 	if (async_chunk->locked_page) {
1182 		u64 locked_page_start = page_offset(async_chunk->locked_page);
1183 		u64 locked_page_end = locked_page_start + PAGE_SIZE - 1;
1184 
1185 		if (!(start >= locked_page_end || end <= locked_page_start))
1186 			locked_page = async_chunk->locked_page;
1187 	}
1188 
1189 	if (async_extent->compress_type == BTRFS_COMPRESS_NONE) {
1190 		submit_uncompressed_range(inode, async_extent, locked_page);
1191 		goto done;
1192 	}
1193 
1194 	ret = btrfs_reserve_extent(root, async_extent->ram_size,
1195 				   async_extent->compressed_size,
1196 				   async_extent->compressed_size,
1197 				   0, *alloc_hint, &ins, 1, 1);
1198 	if (ret) {
1199 		/*
1200 		 * We can't reserve contiguous space for the compressed size.
1201 		 * Unlikely, but it's possible that we could have enough
1202 		 * non-contiguous space for the uncompressed size instead.  So
1203 		 * fall back to uncompressed.
1204 		 */
1205 		submit_uncompressed_range(inode, async_extent, locked_page);
1206 		goto done;
1207 	}
1208 
1209 	lock_extent(io_tree, start, end, &cached);
1210 
1211 	/* Here we're doing allocation and writeback of the compressed pages */
1212 	file_extent.disk_bytenr = ins.objectid;
1213 	file_extent.disk_num_bytes = ins.offset;
1214 	file_extent.ram_bytes = async_extent->ram_size;
1215 	file_extent.num_bytes = async_extent->ram_size;
1216 	file_extent.offset = 0;
1217 	file_extent.compression = async_extent->compress_type;
1218 
1219 	em = btrfs_create_io_em(inode, start, &file_extent, BTRFS_ORDERED_COMPRESSED);
1220 	if (IS_ERR(em)) {
1221 		ret = PTR_ERR(em);
1222 		goto out_free_reserve;
1223 	}
1224 	free_extent_map(em);
1225 
1226 	ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
1227 					     1 << BTRFS_ORDERED_COMPRESSED);
1228 	if (IS_ERR(ordered)) {
1229 		btrfs_drop_extent_map_range(inode, start, end, false);
1230 		ret = PTR_ERR(ordered);
1231 		goto out_free_reserve;
1232 	}
1233 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1234 
1235 	/* Clear dirty, set writeback and unlock the pages. */
1236 	extent_clear_unlock_delalloc(inode, start, end,
1237 			NULL, &cached, EXTENT_LOCKED | EXTENT_DELALLOC,
1238 			PAGE_UNLOCK | PAGE_START_WRITEBACK);
1239 	btrfs_submit_compressed_write(ordered,
1240 			    async_extent->folios,	/* compressed_folios */
1241 			    async_extent->nr_folios,
1242 			    async_chunk->write_flags, true);
1243 	*alloc_hint = ins.objectid + ins.offset;
1244 done:
1245 	if (async_chunk->blkcg_css)
1246 		kthread_associate_blkcg(NULL);
1247 	kfree(async_extent);
1248 	return;
1249 
1250 out_free_reserve:
1251 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1252 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
1253 	mapping_set_error(inode->vfs_inode.i_mapping, -EIO);
1254 	extent_clear_unlock_delalloc(inode, start, end,
1255 				     NULL, &cached,
1256 				     EXTENT_LOCKED | EXTENT_DELALLOC |
1257 				     EXTENT_DELALLOC_NEW |
1258 				     EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
1259 				     PAGE_UNLOCK | PAGE_START_WRITEBACK |
1260 				     PAGE_END_WRITEBACK);
1261 	free_async_extent_pages(async_extent);
1262 	if (async_chunk->blkcg_css)
1263 		kthread_associate_blkcg(NULL);
1264 	btrfs_debug(fs_info,
1265 "async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d",
1266 		    btrfs_root_id(root), btrfs_ino(inode), start,
1267 		    async_extent->ram_size, ret);
1268 	kfree(async_extent);
1269 }
1270 
1271 u64 btrfs_get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
1272 				     u64 num_bytes)
1273 {
1274 	struct extent_map_tree *em_tree = &inode->extent_tree;
1275 	struct extent_map *em;
1276 	u64 alloc_hint = 0;
1277 
1278 	read_lock(&em_tree->lock);
1279 	em = search_extent_mapping(em_tree, start, num_bytes);
1280 	if (em) {
1281 		/*
1282 		 * if block start isn't an actual block number then find the
1283 		 * first block in this inode and use that as a hint.  If that
1284 		 * block is also bogus then just don't worry about it.
1285 		 */
1286 		if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
1287 			free_extent_map(em);
1288 			em = search_extent_mapping(em_tree, 0, 0);
1289 			if (em && em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
1290 				alloc_hint = extent_map_block_start(em);
1291 			if (em)
1292 				free_extent_map(em);
1293 		} else {
1294 			alloc_hint = extent_map_block_start(em);
1295 			free_extent_map(em);
1296 		}
1297 	}
1298 	read_unlock(&em_tree->lock);
1299 
1300 	return alloc_hint;
1301 }
1302 
1303 /*
1304  * when extent_io.c finds a delayed allocation range in the file,
1305  * the call backs end up in this code.  The basic idea is to
1306  * allocate extents on disk for the range, and create ordered data structs
1307  * in ram to track those extents.
1308  *
1309  * locked_page is the page that writepage had locked already.  We use
1310  * it to make sure we don't do extra locks or unlocks.
1311  *
1312  * When this function fails, it unlocks all pages except @locked_page.
1313  *
1314  * When this function successfully creates an inline extent, it returns 1 and
1315  * unlocks all pages including locked_page and starts I/O on them.
1316  * (In reality inline extents are limited to a single page, so locked_page is
1317  * the only page handled anyway).
1318  *
1319  * When this function succeed and creates a normal extent, the page locking
1320  * status depends on the passed in flags:
1321  *
1322  * - If @keep_locked is set, all pages are kept locked.
1323  * - Else all pages except for @locked_page are unlocked.
1324  *
1325  * When a failure happens in the second or later iteration of the
1326  * while-loop, the ordered extents created in previous iterations are kept
1327  * intact. So, the caller must clean them up by calling
1328  * btrfs_cleanup_ordered_extents(). See btrfs_run_delalloc_range() for
1329  * example.
1330  */
1331 static noinline int cow_file_range(struct btrfs_inode *inode,
1332 				   struct page *locked_page, u64 start, u64 end,
1333 				   u64 *done_offset,
1334 				   bool keep_locked, bool no_inline)
1335 {
1336 	struct btrfs_root *root = inode->root;
1337 	struct btrfs_fs_info *fs_info = root->fs_info;
1338 	struct extent_state *cached = NULL;
1339 	u64 alloc_hint = 0;
1340 	u64 orig_start = start;
1341 	u64 num_bytes;
1342 	unsigned long ram_size;
1343 	u64 cur_alloc_size = 0;
1344 	u64 min_alloc_size;
1345 	u64 blocksize = fs_info->sectorsize;
1346 	struct btrfs_key ins;
1347 	struct extent_map *em;
1348 	unsigned clear_bits;
1349 	unsigned long page_ops;
1350 	bool extent_reserved = false;
1351 	int ret = 0;
1352 
1353 	if (btrfs_is_free_space_inode(inode)) {
1354 		ret = -EINVAL;
1355 		goto out_unlock;
1356 	}
1357 
1358 	num_bytes = ALIGN(end - start + 1, blocksize);
1359 	num_bytes = max(blocksize,  num_bytes);
1360 	ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy));
1361 
1362 	inode_should_defrag(inode, start, end, num_bytes, SZ_64K);
1363 
1364 	if (!no_inline) {
1365 		/* lets try to make an inline extent */
1366 		ret = cow_file_range_inline(inode, locked_page, start, end, 0,
1367 					    BTRFS_COMPRESS_NONE, NULL, false);
1368 		if (ret <= 0) {
1369 			/*
1370 			 * We succeeded, return 1 so the caller knows we're done
1371 			 * with this page and already handled the IO.
1372 			 *
1373 			 * If there was an error then cow_file_range_inline() has
1374 			 * already done the cleanup.
1375 			 */
1376 			if (ret == 0)
1377 				ret = 1;
1378 			goto done;
1379 		}
1380 	}
1381 
1382 	alloc_hint = btrfs_get_extent_allocation_hint(inode, start, num_bytes);
1383 
1384 	/*
1385 	 * Relocation relies on the relocated extents to have exactly the same
1386 	 * size as the original extents. Normally writeback for relocation data
1387 	 * extents follows a NOCOW path because relocation preallocates the
1388 	 * extents. However, due to an operation such as scrub turning a block
1389 	 * group to RO mode, it may fallback to COW mode, so we must make sure
1390 	 * an extent allocated during COW has exactly the requested size and can
1391 	 * not be split into smaller extents, otherwise relocation breaks and
1392 	 * fails during the stage where it updates the bytenr of file extent
1393 	 * items.
1394 	 */
1395 	if (btrfs_is_data_reloc_root(root))
1396 		min_alloc_size = num_bytes;
1397 	else
1398 		min_alloc_size = fs_info->sectorsize;
1399 
1400 	while (num_bytes > 0) {
1401 		struct btrfs_ordered_extent *ordered;
1402 		struct btrfs_file_extent file_extent;
1403 
1404 		cur_alloc_size = num_bytes;
1405 		ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
1406 					   min_alloc_size, 0, alloc_hint,
1407 					   &ins, 1, 1);
1408 		if (ret == -EAGAIN) {
1409 			/*
1410 			 * btrfs_reserve_extent only returns -EAGAIN for zoned
1411 			 * file systems, which is an indication that there are
1412 			 * no active zones to allocate from at the moment.
1413 			 *
1414 			 * If this is the first loop iteration, wait for at
1415 			 * least one zone to finish before retrying the
1416 			 * allocation.  Otherwise ask the caller to write out
1417 			 * the already allocated blocks before coming back to
1418 			 * us, or return -ENOSPC if it can't handle retries.
1419 			 */
1420 			ASSERT(btrfs_is_zoned(fs_info));
1421 			if (start == orig_start) {
1422 				wait_on_bit_io(&inode->root->fs_info->flags,
1423 					       BTRFS_FS_NEED_ZONE_FINISH,
1424 					       TASK_UNINTERRUPTIBLE);
1425 				continue;
1426 			}
1427 			if (done_offset) {
1428 				*done_offset = start - 1;
1429 				return 0;
1430 			}
1431 			ret = -ENOSPC;
1432 		}
1433 		if (ret < 0)
1434 			goto out_unlock;
1435 		cur_alloc_size = ins.offset;
1436 		extent_reserved = true;
1437 
1438 		ram_size = ins.offset;
1439 		file_extent.disk_bytenr = ins.objectid;
1440 		file_extent.disk_num_bytes = ins.offset;
1441 		file_extent.num_bytes = ins.offset;
1442 		file_extent.ram_bytes = ins.offset;
1443 		file_extent.offset = 0;
1444 		file_extent.compression = BTRFS_COMPRESS_NONE;
1445 
1446 		lock_extent(&inode->io_tree, start, start + ram_size - 1,
1447 			    &cached);
1448 
1449 		em = btrfs_create_io_em(inode, start, &file_extent,
1450 					BTRFS_ORDERED_REGULAR);
1451 		if (IS_ERR(em)) {
1452 			unlock_extent(&inode->io_tree, start,
1453 				      start + ram_size - 1, &cached);
1454 			ret = PTR_ERR(em);
1455 			goto out_reserve;
1456 		}
1457 		free_extent_map(em);
1458 
1459 		ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
1460 						     1 << BTRFS_ORDERED_REGULAR);
1461 		if (IS_ERR(ordered)) {
1462 			unlock_extent(&inode->io_tree, start,
1463 				      start + ram_size - 1, &cached);
1464 			ret = PTR_ERR(ordered);
1465 			goto out_drop_extent_cache;
1466 		}
1467 
1468 		if (btrfs_is_data_reloc_root(root)) {
1469 			ret = btrfs_reloc_clone_csums(ordered);
1470 
1471 			/*
1472 			 * Only drop cache here, and process as normal.
1473 			 *
1474 			 * We must not allow extent_clear_unlock_delalloc()
1475 			 * at out_unlock label to free meta of this ordered
1476 			 * extent, as its meta should be freed by
1477 			 * btrfs_finish_ordered_io().
1478 			 *
1479 			 * So we must continue until @start is increased to
1480 			 * skip current ordered extent.
1481 			 */
1482 			if (ret)
1483 				btrfs_drop_extent_map_range(inode, start,
1484 							    start + ram_size - 1,
1485 							    false);
1486 		}
1487 		btrfs_put_ordered_extent(ordered);
1488 
1489 		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1490 
1491 		/*
1492 		 * We're not doing compressed IO, don't unlock the first page
1493 		 * (which the caller expects to stay locked), don't clear any
1494 		 * dirty bits and don't set any writeback bits
1495 		 *
1496 		 * Do set the Ordered (Private2) bit so we know this page was
1497 		 * properly setup for writepage.
1498 		 */
1499 		page_ops = (keep_locked ? 0 : PAGE_UNLOCK);
1500 		page_ops |= PAGE_SET_ORDERED;
1501 
1502 		extent_clear_unlock_delalloc(inode, start, start + ram_size - 1,
1503 					     locked_page, &cached,
1504 					     EXTENT_LOCKED | EXTENT_DELALLOC,
1505 					     page_ops);
1506 		if (num_bytes < cur_alloc_size)
1507 			num_bytes = 0;
1508 		else
1509 			num_bytes -= cur_alloc_size;
1510 		alloc_hint = ins.objectid + ins.offset;
1511 		start += cur_alloc_size;
1512 		extent_reserved = false;
1513 
1514 		/*
1515 		 * btrfs_reloc_clone_csums() error, since start is increased
1516 		 * extent_clear_unlock_delalloc() at out_unlock label won't
1517 		 * free metadata of current ordered extent, we're OK to exit.
1518 		 */
1519 		if (ret)
1520 			goto out_unlock;
1521 	}
1522 done:
1523 	if (done_offset)
1524 		*done_offset = end;
1525 	return ret;
1526 
1527 out_drop_extent_cache:
1528 	btrfs_drop_extent_map_range(inode, start, start + ram_size - 1, false);
1529 out_reserve:
1530 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1531 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
1532 out_unlock:
1533 	/*
1534 	 * Now, we have three regions to clean up:
1535 	 *
1536 	 * |-------(1)----|---(2)---|-------------(3)----------|
1537 	 * `- orig_start  `- start  `- start + cur_alloc_size  `- end
1538 	 *
1539 	 * We process each region below.
1540 	 */
1541 
1542 	clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
1543 		EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
1544 	page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
1545 
1546 	/*
1547 	 * For the range (1). We have already instantiated the ordered extents
1548 	 * for this region. They are cleaned up by
1549 	 * btrfs_cleanup_ordered_extents() in e.g,
1550 	 * btrfs_run_delalloc_range(). EXTENT_LOCKED | EXTENT_DELALLOC are
1551 	 * already cleared in the above loop. And, EXTENT_DELALLOC_NEW |
1552 	 * EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV are handled by the cleanup
1553 	 * function.
1554 	 *
1555 	 * However, in case of @keep_locked, we still need to unlock the pages
1556 	 * (except @locked_page) to ensure all the pages are unlocked.
1557 	 */
1558 	if (keep_locked && orig_start < start) {
1559 		if (!locked_page)
1560 			mapping_set_error(inode->vfs_inode.i_mapping, ret);
1561 		extent_clear_unlock_delalloc(inode, orig_start, start - 1,
1562 					     locked_page, NULL, 0, page_ops);
1563 	}
1564 
1565 	/*
1566 	 * At this point we're unlocked, we want to make sure we're only
1567 	 * clearing these flags under the extent lock, so lock the rest of the
1568 	 * range and clear everything up.
1569 	 */
1570 	lock_extent(&inode->io_tree, start, end, NULL);
1571 
1572 	/*
1573 	 * For the range (2). If we reserved an extent for our delalloc range
1574 	 * (or a subrange) and failed to create the respective ordered extent,
1575 	 * then it means that when we reserved the extent we decremented the
1576 	 * extent's size from the data space_info's bytes_may_use counter and
1577 	 * incremented the space_info's bytes_reserved counter by the same
1578 	 * amount. We must make sure extent_clear_unlock_delalloc() does not try
1579 	 * to decrement again the data space_info's bytes_may_use counter,
1580 	 * therefore we do not pass it the flag EXTENT_CLEAR_DATA_RESV.
1581 	 */
1582 	if (extent_reserved) {
1583 		extent_clear_unlock_delalloc(inode, start,
1584 					     start + cur_alloc_size - 1,
1585 					     locked_page, &cached,
1586 					     clear_bits,
1587 					     page_ops);
1588 		start += cur_alloc_size;
1589 	}
1590 
1591 	/*
1592 	 * For the range (3). We never touched the region. In addition to the
1593 	 * clear_bits above, we add EXTENT_CLEAR_DATA_RESV to release the data
1594 	 * space_info's bytes_may_use counter, reserved in
1595 	 * btrfs_check_data_free_space().
1596 	 */
1597 	if (start < end) {
1598 		clear_bits |= EXTENT_CLEAR_DATA_RESV;
1599 		extent_clear_unlock_delalloc(inode, start, end, locked_page,
1600 					     &cached, clear_bits, page_ops);
1601 	}
1602 	return ret;
1603 }
1604 
1605 /*
1606  * Phase two of compressed writeback.  This is the ordered portion of the code,
1607  * which only gets called in the order the work was queued.  We walk all the
1608  * async extents created by compress_file_range and send them down to the disk.
1609  *
1610  * If called with @do_free == true then it'll try to finish the work and free
1611  * the work struct eventually.
1612  */
1613 static noinline void submit_compressed_extents(struct btrfs_work *work, bool do_free)
1614 {
1615 	struct async_chunk *async_chunk = container_of(work, struct async_chunk,
1616 						     work);
1617 	struct btrfs_fs_info *fs_info = btrfs_work_owner(work);
1618 	struct async_extent *async_extent;
1619 	unsigned long nr_pages;
1620 	u64 alloc_hint = 0;
1621 
1622 	if (do_free) {
1623 		struct async_cow *async_cow;
1624 
1625 		btrfs_add_delayed_iput(async_chunk->inode);
1626 		if (async_chunk->blkcg_css)
1627 			css_put(async_chunk->blkcg_css);
1628 
1629 		async_cow = async_chunk->async_cow;
1630 		if (atomic_dec_and_test(&async_cow->num_chunks))
1631 			kvfree(async_cow);
1632 		return;
1633 	}
1634 
1635 	nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
1636 		PAGE_SHIFT;
1637 
1638 	while (!list_empty(&async_chunk->extents)) {
1639 		async_extent = list_entry(async_chunk->extents.next,
1640 					  struct async_extent, list);
1641 		list_del(&async_extent->list);
1642 		submit_one_async_extent(async_chunk, async_extent, &alloc_hint);
1643 	}
1644 
1645 	/* atomic_sub_return implies a barrier */
1646 	if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
1647 	    5 * SZ_1M)
1648 		cond_wake_up_nomb(&fs_info->async_submit_wait);
1649 }
1650 
1651 static bool run_delalloc_compressed(struct btrfs_inode *inode,
1652 				    struct page *locked_page, u64 start,
1653 				    u64 end, struct writeback_control *wbc)
1654 {
1655 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1656 	struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc);
1657 	struct async_cow *ctx;
1658 	struct async_chunk *async_chunk;
1659 	unsigned long nr_pages;
1660 	u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K);
1661 	int i;
1662 	unsigned nofs_flag;
1663 	const blk_opf_t write_flags = wbc_to_write_flags(wbc);
1664 
1665 	nofs_flag = memalloc_nofs_save();
1666 	ctx = kvmalloc(struct_size(ctx, chunks, num_chunks), GFP_KERNEL);
1667 	memalloc_nofs_restore(nofs_flag);
1668 	if (!ctx)
1669 		return false;
1670 
1671 	set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags);
1672 
1673 	async_chunk = ctx->chunks;
1674 	atomic_set(&ctx->num_chunks, num_chunks);
1675 
1676 	for (i = 0; i < num_chunks; i++) {
1677 		u64 cur_end = min(end, start + SZ_512K - 1);
1678 
1679 		/*
1680 		 * igrab is called higher up in the call chain, take only the
1681 		 * lightweight reference for the callback lifetime
1682 		 */
1683 		ihold(&inode->vfs_inode);
1684 		async_chunk[i].async_cow = ctx;
1685 		async_chunk[i].inode = inode;
1686 		async_chunk[i].start = start;
1687 		async_chunk[i].end = cur_end;
1688 		async_chunk[i].write_flags = write_flags;
1689 		INIT_LIST_HEAD(&async_chunk[i].extents);
1690 
1691 		/*
1692 		 * The locked_page comes all the way from writepage and its
1693 		 * the original page we were actually given.  As we spread
1694 		 * this large delalloc region across multiple async_chunk
1695 		 * structs, only the first struct needs a pointer to locked_page
1696 		 *
1697 		 * This way we don't need racey decisions about who is supposed
1698 		 * to unlock it.
1699 		 */
1700 		if (locked_page) {
1701 			/*
1702 			 * Depending on the compressibility, the pages might or
1703 			 * might not go through async.  We want all of them to
1704 			 * be accounted against wbc once.  Let's do it here
1705 			 * before the paths diverge.  wbc accounting is used
1706 			 * only for foreign writeback detection and doesn't
1707 			 * need full accuracy.  Just account the whole thing
1708 			 * against the first page.
1709 			 */
1710 			wbc_account_cgroup_owner(wbc, locked_page,
1711 						 cur_end - start);
1712 			async_chunk[i].locked_page = locked_page;
1713 			locked_page = NULL;
1714 		} else {
1715 			async_chunk[i].locked_page = NULL;
1716 		}
1717 
1718 		if (blkcg_css != blkcg_root_css) {
1719 			css_get(blkcg_css);
1720 			async_chunk[i].blkcg_css = blkcg_css;
1721 			async_chunk[i].write_flags |= REQ_BTRFS_CGROUP_PUNT;
1722 		} else {
1723 			async_chunk[i].blkcg_css = NULL;
1724 		}
1725 
1726 		btrfs_init_work(&async_chunk[i].work, compress_file_range,
1727 				submit_compressed_extents);
1728 
1729 		nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
1730 		atomic_add(nr_pages, &fs_info->async_delalloc_pages);
1731 
1732 		btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work);
1733 
1734 		start = cur_end + 1;
1735 	}
1736 	return true;
1737 }
1738 
1739 /*
1740  * Run the delalloc range from start to end, and write back any dirty pages
1741  * covered by the range.
1742  */
1743 static noinline int run_delalloc_cow(struct btrfs_inode *inode,
1744 				     struct page *locked_page, u64 start,
1745 				     u64 end, struct writeback_control *wbc,
1746 				     bool pages_dirty)
1747 {
1748 	u64 done_offset = end;
1749 	int ret;
1750 
1751 	while (start <= end) {
1752 		ret = cow_file_range(inode, locked_page, start, end, &done_offset,
1753 				     true, false);
1754 		if (ret)
1755 			return ret;
1756 		extent_write_locked_range(&inode->vfs_inode, locked_page, start,
1757 					  done_offset, wbc, pages_dirty);
1758 		start = done_offset + 1;
1759 	}
1760 
1761 	return 1;
1762 }
1763 
1764 static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page,
1765 			   const u64 start, const u64 end)
1766 {
1767 	const bool is_space_ino = btrfs_is_free_space_inode(inode);
1768 	const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root);
1769 	const u64 range_bytes = end + 1 - start;
1770 	struct extent_io_tree *io_tree = &inode->io_tree;
1771 	struct extent_state *cached_state = NULL;
1772 	u64 range_start = start;
1773 	u64 count;
1774 	int ret;
1775 
1776 	/*
1777 	 * If EXTENT_NORESERVE is set it means that when the buffered write was
1778 	 * made we had not enough available data space and therefore we did not
1779 	 * reserve data space for it, since we though we could do NOCOW for the
1780 	 * respective file range (either there is prealloc extent or the inode
1781 	 * has the NOCOW bit set).
1782 	 *
1783 	 * However when we need to fallback to COW mode (because for example the
1784 	 * block group for the corresponding extent was turned to RO mode by a
1785 	 * scrub or relocation) we need to do the following:
1786 	 *
1787 	 * 1) We increment the bytes_may_use counter of the data space info.
1788 	 *    If COW succeeds, it allocates a new data extent and after doing
1789 	 *    that it decrements the space info's bytes_may_use counter and
1790 	 *    increments its bytes_reserved counter by the same amount (we do
1791 	 *    this at btrfs_add_reserved_bytes()). So we need to increment the
1792 	 *    bytes_may_use counter to compensate (when space is reserved at
1793 	 *    buffered write time, the bytes_may_use counter is incremented);
1794 	 *
1795 	 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so
1796 	 *    that if the COW path fails for any reason, it decrements (through
1797 	 *    extent_clear_unlock_delalloc()) the bytes_may_use counter of the
1798 	 *    data space info, which we incremented in the step above.
1799 	 *
1800 	 * If we need to fallback to cow and the inode corresponds to a free
1801 	 * space cache inode or an inode of the data relocation tree, we must
1802 	 * also increment bytes_may_use of the data space_info for the same
1803 	 * reason. Space caches and relocated data extents always get a prealloc
1804 	 * extent for them, however scrub or balance may have set the block
1805 	 * group that contains that extent to RO mode and therefore force COW
1806 	 * when starting writeback.
1807 	 */
1808 	lock_extent(io_tree, start, end, &cached_state);
1809 	count = count_range_bits(io_tree, &range_start, end, range_bytes,
1810 				 EXTENT_NORESERVE, 0, NULL);
1811 	if (count > 0 || is_space_ino || is_reloc_ino) {
1812 		u64 bytes = count;
1813 		struct btrfs_fs_info *fs_info = inode->root->fs_info;
1814 		struct btrfs_space_info *sinfo = fs_info->data_sinfo;
1815 
1816 		if (is_space_ino || is_reloc_ino)
1817 			bytes = range_bytes;
1818 
1819 		spin_lock(&sinfo->lock);
1820 		btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes);
1821 		spin_unlock(&sinfo->lock);
1822 
1823 		if (count > 0)
1824 			clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE,
1825 					 NULL);
1826 	}
1827 	unlock_extent(io_tree, start, end, &cached_state);
1828 
1829 	/*
1830 	 * Don't try to create inline extents, as a mix of inline extent that
1831 	 * is written out and unlocked directly and a normal NOCOW extent
1832 	 * doesn't work.
1833 	 */
1834 	ret = cow_file_range(inode, locked_page, start, end, NULL, false, true);
1835 	ASSERT(ret != 1);
1836 	return ret;
1837 }
1838 
1839 struct can_nocow_file_extent_args {
1840 	/* Input fields. */
1841 
1842 	/* Start file offset of the range we want to NOCOW. */
1843 	u64 start;
1844 	/* End file offset (inclusive) of the range we want to NOCOW. */
1845 	u64 end;
1846 	bool writeback_path;
1847 	bool strict;
1848 	/*
1849 	 * Free the path passed to can_nocow_file_extent() once it's not needed
1850 	 * anymore.
1851 	 */
1852 	bool free_path;
1853 
1854 	/*
1855 	 * Output fields. Only set when can_nocow_file_extent() returns 1.
1856 	 * The expected file extent for the NOCOW write.
1857 	 */
1858 	struct btrfs_file_extent file_extent;
1859 };
1860 
1861 /*
1862  * Check if we can NOCOW the file extent that the path points to.
1863  * This function may return with the path released, so the caller should check
1864  * if path->nodes[0] is NULL or not if it needs to use the path afterwards.
1865  *
1866  * Returns: < 0 on error
1867  *            0 if we can not NOCOW
1868  *            1 if we can NOCOW
1869  */
1870 static int can_nocow_file_extent(struct btrfs_path *path,
1871 				 struct btrfs_key *key,
1872 				 struct btrfs_inode *inode,
1873 				 struct can_nocow_file_extent_args *args)
1874 {
1875 	const bool is_freespace_inode = btrfs_is_free_space_inode(inode);
1876 	struct extent_buffer *leaf = path->nodes[0];
1877 	struct btrfs_root *root = inode->root;
1878 	struct btrfs_file_extent_item *fi;
1879 	struct btrfs_root *csum_root;
1880 	u64 io_start;
1881 	u64 extent_end;
1882 	u8 extent_type;
1883 	int can_nocow = 0;
1884 	int ret = 0;
1885 	bool nowait = path->nowait;
1886 
1887 	fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
1888 	extent_type = btrfs_file_extent_type(leaf, fi);
1889 
1890 	if (extent_type == BTRFS_FILE_EXTENT_INLINE)
1891 		goto out;
1892 
1893 	if (!(inode->flags & BTRFS_INODE_NODATACOW) &&
1894 	    extent_type == BTRFS_FILE_EXTENT_REG)
1895 		goto out;
1896 
1897 	/*
1898 	 * If the extent was created before the generation where the last snapshot
1899 	 * for its subvolume was created, then this implies the extent is shared,
1900 	 * hence we must COW.
1901 	 */
1902 	if (!args->strict &&
1903 	    btrfs_file_extent_generation(leaf, fi) <=
1904 	    btrfs_root_last_snapshot(&root->root_item))
1905 		goto out;
1906 
1907 	/* An explicit hole, must COW. */
1908 	if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
1909 		goto out;
1910 
1911 	/* Compressed/encrypted/encoded extents must be COWed. */
1912 	if (btrfs_file_extent_compression(leaf, fi) ||
1913 	    btrfs_file_extent_encryption(leaf, fi) ||
1914 	    btrfs_file_extent_other_encoding(leaf, fi))
1915 		goto out;
1916 
1917 	extent_end = btrfs_file_extent_end(path);
1918 
1919 	args->file_extent.disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1920 	args->file_extent.disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1921 	args->file_extent.ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1922 	args->file_extent.offset = btrfs_file_extent_offset(leaf, fi);
1923 	args->file_extent.compression = btrfs_file_extent_compression(leaf, fi);
1924 
1925 	/*
1926 	 * The following checks can be expensive, as they need to take other
1927 	 * locks and do btree or rbtree searches, so release the path to avoid
1928 	 * blocking other tasks for too long.
1929 	 */
1930 	btrfs_release_path(path);
1931 
1932 	ret = btrfs_cross_ref_exist(root, btrfs_ino(inode),
1933 				    key->offset - args->file_extent.offset,
1934 				    args->file_extent.disk_bytenr, args->strict, path);
1935 	WARN_ON_ONCE(ret > 0 && is_freespace_inode);
1936 	if (ret != 0)
1937 		goto out;
1938 
1939 	if (args->free_path) {
1940 		/*
1941 		 * We don't need the path anymore, plus through the
1942 		 * btrfs_lookup_csums_list() call below we will end up allocating
1943 		 * another path. So free the path to avoid unnecessary extra
1944 		 * memory usage.
1945 		 */
1946 		btrfs_free_path(path);
1947 		path = NULL;
1948 	}
1949 
1950 	/* If there are pending snapshots for this root, we must COW. */
1951 	if (args->writeback_path && !is_freespace_inode &&
1952 	    atomic_read(&root->snapshot_force_cow))
1953 		goto out;
1954 
1955 	args->file_extent.num_bytes = min(args->end + 1, extent_end) - args->start;
1956 	args->file_extent.offset += args->start - key->offset;
1957 	io_start = args->file_extent.disk_bytenr + args->file_extent.offset;
1958 
1959 	/*
1960 	 * Force COW if csums exist in the range. This ensures that csums for a
1961 	 * given extent are either valid or do not exist.
1962 	 */
1963 
1964 	csum_root = btrfs_csum_root(root->fs_info, io_start);
1965 	ret = btrfs_lookup_csums_list(csum_root, io_start,
1966 				      io_start + args->file_extent.num_bytes - 1,
1967 				      NULL, nowait);
1968 	WARN_ON_ONCE(ret > 0 && is_freespace_inode);
1969 	if (ret != 0)
1970 		goto out;
1971 
1972 	can_nocow = 1;
1973  out:
1974 	if (args->free_path && path)
1975 		btrfs_free_path(path);
1976 
1977 	return ret < 0 ? ret : can_nocow;
1978 }
1979 
1980 /*
1981  * when nowcow writeback call back.  This checks for snapshots or COW copies
1982  * of the extents that exist in the file, and COWs the file as required.
1983  *
1984  * If no cow copies or snapshots exist, we write directly to the existing
1985  * blocks on disk
1986  */
1987 static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
1988 				       struct page *locked_page,
1989 				       const u64 start, const u64 end)
1990 {
1991 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1992 	struct btrfs_root *root = inode->root;
1993 	struct btrfs_path *path;
1994 	u64 cow_start = (u64)-1;
1995 	u64 cur_offset = start;
1996 	int ret;
1997 	bool check_prev = true;
1998 	u64 ino = btrfs_ino(inode);
1999 	struct can_nocow_file_extent_args nocow_args = { 0 };
2000 
2001 	/*
2002 	 * Normally on a zoned device we're only doing COW writes, but in case
2003 	 * of relocation on a zoned filesystem serializes I/O so that we're only
2004 	 * writing sequentially and can end up here as well.
2005 	 */
2006 	ASSERT(!btrfs_is_zoned(fs_info) || btrfs_is_data_reloc_root(root));
2007 
2008 	path = btrfs_alloc_path();
2009 	if (!path) {
2010 		ret = -ENOMEM;
2011 		goto error;
2012 	}
2013 
2014 	nocow_args.end = end;
2015 	nocow_args.writeback_path = true;
2016 
2017 	while (cur_offset <= end) {
2018 		struct btrfs_block_group *nocow_bg = NULL;
2019 		struct btrfs_ordered_extent *ordered;
2020 		struct btrfs_key found_key;
2021 		struct btrfs_file_extent_item *fi;
2022 		struct extent_buffer *leaf;
2023 		struct extent_state *cached_state = NULL;
2024 		u64 extent_end;
2025 		u64 nocow_end;
2026 		int extent_type;
2027 		bool is_prealloc;
2028 
2029 		ret = btrfs_lookup_file_extent(NULL, root, path, ino,
2030 					       cur_offset, 0);
2031 		if (ret < 0)
2032 			goto error;
2033 
2034 		/*
2035 		 * If there is no extent for our range when doing the initial
2036 		 * search, then go back to the previous slot as it will be the
2037 		 * one containing the search offset
2038 		 */
2039 		if (ret > 0 && path->slots[0] > 0 && check_prev) {
2040 			leaf = path->nodes[0];
2041 			btrfs_item_key_to_cpu(leaf, &found_key,
2042 					      path->slots[0] - 1);
2043 			if (found_key.objectid == ino &&
2044 			    found_key.type == BTRFS_EXTENT_DATA_KEY)
2045 				path->slots[0]--;
2046 		}
2047 		check_prev = false;
2048 next_slot:
2049 		/* Go to next leaf if we have exhausted the current one */
2050 		leaf = path->nodes[0];
2051 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2052 			ret = btrfs_next_leaf(root, path);
2053 			if (ret < 0)
2054 				goto error;
2055 			if (ret > 0)
2056 				break;
2057 			leaf = path->nodes[0];
2058 		}
2059 
2060 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2061 
2062 		/* Didn't find anything for our INO */
2063 		if (found_key.objectid > ino)
2064 			break;
2065 		/*
2066 		 * Keep searching until we find an EXTENT_ITEM or there are no
2067 		 * more extents for this inode
2068 		 */
2069 		if (WARN_ON_ONCE(found_key.objectid < ino) ||
2070 		    found_key.type < BTRFS_EXTENT_DATA_KEY) {
2071 			path->slots[0]++;
2072 			goto next_slot;
2073 		}
2074 
2075 		/* Found key is not EXTENT_DATA_KEY or starts after req range */
2076 		if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
2077 		    found_key.offset > end)
2078 			break;
2079 
2080 		/*
2081 		 * If the found extent starts after requested offset, then
2082 		 * adjust extent_end to be right before this extent begins
2083 		 */
2084 		if (found_key.offset > cur_offset) {
2085 			extent_end = found_key.offset;
2086 			extent_type = 0;
2087 			goto must_cow;
2088 		}
2089 
2090 		/*
2091 		 * Found extent which begins before our range and potentially
2092 		 * intersect it
2093 		 */
2094 		fi = btrfs_item_ptr(leaf, path->slots[0],
2095 				    struct btrfs_file_extent_item);
2096 		extent_type = btrfs_file_extent_type(leaf, fi);
2097 		/* If this is triggered then we have a memory corruption. */
2098 		ASSERT(extent_type < BTRFS_NR_FILE_EXTENT_TYPES);
2099 		if (WARN_ON(extent_type >= BTRFS_NR_FILE_EXTENT_TYPES)) {
2100 			ret = -EUCLEAN;
2101 			goto error;
2102 		}
2103 		extent_end = btrfs_file_extent_end(path);
2104 
2105 		/*
2106 		 * If the extent we got ends before our current offset, skip to
2107 		 * the next extent.
2108 		 */
2109 		if (extent_end <= cur_offset) {
2110 			path->slots[0]++;
2111 			goto next_slot;
2112 		}
2113 
2114 		nocow_args.start = cur_offset;
2115 		ret = can_nocow_file_extent(path, &found_key, inode, &nocow_args);
2116 		if (ret < 0)
2117 			goto error;
2118 		if (ret == 0)
2119 			goto must_cow;
2120 
2121 		ret = 0;
2122 		nocow_bg = btrfs_inc_nocow_writers(fs_info,
2123 				nocow_args.file_extent.disk_bytenr +
2124 				nocow_args.file_extent.offset);
2125 		if (!nocow_bg) {
2126 must_cow:
2127 			/*
2128 			 * If we can't perform NOCOW writeback for the range,
2129 			 * then record the beginning of the range that needs to
2130 			 * be COWed.  It will be written out before the next
2131 			 * NOCOW range if we find one, or when exiting this
2132 			 * loop.
2133 			 */
2134 			if (cow_start == (u64)-1)
2135 				cow_start = cur_offset;
2136 			cur_offset = extent_end;
2137 			if (cur_offset > end)
2138 				break;
2139 			if (!path->nodes[0])
2140 				continue;
2141 			path->slots[0]++;
2142 			goto next_slot;
2143 		}
2144 
2145 		/*
2146 		 * COW range from cow_start to found_key.offset - 1. As the key
2147 		 * will contain the beginning of the first extent that can be
2148 		 * NOCOW, following one which needs to be COW'ed
2149 		 */
2150 		if (cow_start != (u64)-1) {
2151 			ret = fallback_to_cow(inode, locked_page,
2152 					      cow_start, found_key.offset - 1);
2153 			cow_start = (u64)-1;
2154 			if (ret) {
2155 				btrfs_dec_nocow_writers(nocow_bg);
2156 				goto error;
2157 			}
2158 		}
2159 
2160 		nocow_end = cur_offset + nocow_args.file_extent.num_bytes - 1;
2161 		lock_extent(&inode->io_tree, cur_offset, nocow_end, &cached_state);
2162 
2163 		is_prealloc = extent_type == BTRFS_FILE_EXTENT_PREALLOC;
2164 		if (is_prealloc) {
2165 			struct extent_map *em;
2166 
2167 			em = btrfs_create_io_em(inode, cur_offset,
2168 						&nocow_args.file_extent,
2169 						BTRFS_ORDERED_PREALLOC);
2170 			if (IS_ERR(em)) {
2171 				unlock_extent(&inode->io_tree, cur_offset,
2172 					      nocow_end, &cached_state);
2173 				btrfs_dec_nocow_writers(nocow_bg);
2174 				ret = PTR_ERR(em);
2175 				goto error;
2176 			}
2177 			free_extent_map(em);
2178 		}
2179 
2180 		ordered = btrfs_alloc_ordered_extent(inode, cur_offset,
2181 				&nocow_args.file_extent,
2182 				is_prealloc
2183 				? (1 << BTRFS_ORDERED_PREALLOC)
2184 				: (1 << BTRFS_ORDERED_NOCOW));
2185 		btrfs_dec_nocow_writers(nocow_bg);
2186 		if (IS_ERR(ordered)) {
2187 			if (is_prealloc) {
2188 				btrfs_drop_extent_map_range(inode, cur_offset,
2189 							    nocow_end, false);
2190 			}
2191 			unlock_extent(&inode->io_tree, cur_offset,
2192 				      nocow_end, &cached_state);
2193 			ret = PTR_ERR(ordered);
2194 			goto error;
2195 		}
2196 
2197 		if (btrfs_is_data_reloc_root(root))
2198 			/*
2199 			 * Error handled later, as we must prevent
2200 			 * extent_clear_unlock_delalloc() in error handler
2201 			 * from freeing metadata of created ordered extent.
2202 			 */
2203 			ret = btrfs_reloc_clone_csums(ordered);
2204 		btrfs_put_ordered_extent(ordered);
2205 
2206 		extent_clear_unlock_delalloc(inode, cur_offset, nocow_end,
2207 					     locked_page, &cached_state,
2208 					     EXTENT_LOCKED | EXTENT_DELALLOC |
2209 					     EXTENT_CLEAR_DATA_RESV,
2210 					     PAGE_UNLOCK | PAGE_SET_ORDERED);
2211 
2212 		cur_offset = extent_end;
2213 
2214 		/*
2215 		 * btrfs_reloc_clone_csums() error, now we're OK to call error
2216 		 * handler, as metadata for created ordered extent will only
2217 		 * be freed by btrfs_finish_ordered_io().
2218 		 */
2219 		if (ret)
2220 			goto error;
2221 	}
2222 	btrfs_release_path(path);
2223 
2224 	if (cur_offset <= end && cow_start == (u64)-1)
2225 		cow_start = cur_offset;
2226 
2227 	if (cow_start != (u64)-1) {
2228 		cur_offset = end;
2229 		ret = fallback_to_cow(inode, locked_page, cow_start, end);
2230 		cow_start = (u64)-1;
2231 		if (ret)
2232 			goto error;
2233 	}
2234 
2235 	btrfs_free_path(path);
2236 	return 0;
2237 
2238 error:
2239 	/*
2240 	 * If an error happened while a COW region is outstanding, cur_offset
2241 	 * needs to be reset to cow_start to ensure the COW region is unlocked
2242 	 * as well.
2243 	 */
2244 	if (cow_start != (u64)-1)
2245 		cur_offset = cow_start;
2246 
2247 	/*
2248 	 * We need to lock the extent here because we're clearing DELALLOC and
2249 	 * we're not locked at this point.
2250 	 */
2251 	if (cur_offset < end) {
2252 		struct extent_state *cached = NULL;
2253 
2254 		lock_extent(&inode->io_tree, cur_offset, end, &cached);
2255 		extent_clear_unlock_delalloc(inode, cur_offset, end,
2256 					     locked_page, &cached,
2257 					     EXTENT_LOCKED | EXTENT_DELALLOC |
2258 					     EXTENT_DEFRAG |
2259 					     EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
2260 					     PAGE_START_WRITEBACK |
2261 					     PAGE_END_WRITEBACK);
2262 	}
2263 	btrfs_free_path(path);
2264 	return ret;
2265 }
2266 
2267 static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end)
2268 {
2269 	if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) {
2270 		if (inode->defrag_bytes &&
2271 		    test_range_bit_exists(&inode->io_tree, start, end, EXTENT_DEFRAG))
2272 			return false;
2273 		return true;
2274 	}
2275 	return false;
2276 }
2277 
2278 /*
2279  * Function to process delayed allocation (create CoW) for ranges which are
2280  * being touched for the first time.
2281  */
2282 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page,
2283 			     u64 start, u64 end, struct writeback_control *wbc)
2284 {
2285 	const bool zoned = btrfs_is_zoned(inode->root->fs_info);
2286 	int ret;
2287 
2288 	/*
2289 	 * The range must cover part of the @locked_page, or a return of 1
2290 	 * can confuse the caller.
2291 	 */
2292 	ASSERT(!(end <= page_offset(locked_page) ||
2293 		 start >= page_offset(locked_page) + PAGE_SIZE));
2294 
2295 	if (should_nocow(inode, start, end)) {
2296 		ret = run_delalloc_nocow(inode, locked_page, start, end);
2297 		goto out;
2298 	}
2299 
2300 	if (btrfs_inode_can_compress(inode) &&
2301 	    inode_need_compress(inode, start, end) &&
2302 	    run_delalloc_compressed(inode, locked_page, start, end, wbc))
2303 		return 1;
2304 
2305 	if (zoned)
2306 		ret = run_delalloc_cow(inode, locked_page, start, end, wbc,
2307 				       true);
2308 	else
2309 		ret = cow_file_range(inode, locked_page, start, end, NULL,
2310 				     false, false);
2311 
2312 out:
2313 	if (ret < 0)
2314 		btrfs_cleanup_ordered_extents(inode, locked_page, start,
2315 					      end - start + 1);
2316 	return ret;
2317 }
2318 
2319 void btrfs_split_delalloc_extent(struct btrfs_inode *inode,
2320 				 struct extent_state *orig, u64 split)
2321 {
2322 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2323 	u64 size;
2324 
2325 	lockdep_assert_held(&inode->io_tree.lock);
2326 
2327 	/* not delalloc, ignore it */
2328 	if (!(orig->state & EXTENT_DELALLOC))
2329 		return;
2330 
2331 	size = orig->end - orig->start + 1;
2332 	if (size > fs_info->max_extent_size) {
2333 		u32 num_extents;
2334 		u64 new_size;
2335 
2336 		/*
2337 		 * See the explanation in btrfs_merge_delalloc_extent, the same
2338 		 * applies here, just in reverse.
2339 		 */
2340 		new_size = orig->end - split + 1;
2341 		num_extents = count_max_extents(fs_info, new_size);
2342 		new_size = split - orig->start;
2343 		num_extents += count_max_extents(fs_info, new_size);
2344 		if (count_max_extents(fs_info, size) >= num_extents)
2345 			return;
2346 	}
2347 
2348 	spin_lock(&inode->lock);
2349 	btrfs_mod_outstanding_extents(inode, 1);
2350 	spin_unlock(&inode->lock);
2351 }
2352 
2353 /*
2354  * Handle merged delayed allocation extents so we can keep track of new extents
2355  * that are just merged onto old extents, such as when we are doing sequential
2356  * writes, so we can properly account for the metadata space we'll need.
2357  */
2358 void btrfs_merge_delalloc_extent(struct btrfs_inode *inode, struct extent_state *new,
2359 				 struct extent_state *other)
2360 {
2361 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2362 	u64 new_size, old_size;
2363 	u32 num_extents;
2364 
2365 	lockdep_assert_held(&inode->io_tree.lock);
2366 
2367 	/* not delalloc, ignore it */
2368 	if (!(other->state & EXTENT_DELALLOC))
2369 		return;
2370 
2371 	if (new->start > other->start)
2372 		new_size = new->end - other->start + 1;
2373 	else
2374 		new_size = other->end - new->start + 1;
2375 
2376 	/* we're not bigger than the max, unreserve the space and go */
2377 	if (new_size <= fs_info->max_extent_size) {
2378 		spin_lock(&inode->lock);
2379 		btrfs_mod_outstanding_extents(inode, -1);
2380 		spin_unlock(&inode->lock);
2381 		return;
2382 	}
2383 
2384 	/*
2385 	 * We have to add up either side to figure out how many extents were
2386 	 * accounted for before we merged into one big extent.  If the number of
2387 	 * extents we accounted for is <= the amount we need for the new range
2388 	 * then we can return, otherwise drop.  Think of it like this
2389 	 *
2390 	 * [ 4k][MAX_SIZE]
2391 	 *
2392 	 * So we've grown the extent by a MAX_SIZE extent, this would mean we
2393 	 * need 2 outstanding extents, on one side we have 1 and the other side
2394 	 * we have 1 so they are == and we can return.  But in this case
2395 	 *
2396 	 * [MAX_SIZE+4k][MAX_SIZE+4k]
2397 	 *
2398 	 * Each range on their own accounts for 2 extents, but merged together
2399 	 * they are only 3 extents worth of accounting, so we need to drop in
2400 	 * this case.
2401 	 */
2402 	old_size = other->end - other->start + 1;
2403 	num_extents = count_max_extents(fs_info, old_size);
2404 	old_size = new->end - new->start + 1;
2405 	num_extents += count_max_extents(fs_info, old_size);
2406 	if (count_max_extents(fs_info, new_size) >= num_extents)
2407 		return;
2408 
2409 	spin_lock(&inode->lock);
2410 	btrfs_mod_outstanding_extents(inode, -1);
2411 	spin_unlock(&inode->lock);
2412 }
2413 
2414 static void btrfs_add_delalloc_inode(struct btrfs_inode *inode)
2415 {
2416 	struct btrfs_root *root = inode->root;
2417 	struct btrfs_fs_info *fs_info = root->fs_info;
2418 
2419 	spin_lock(&root->delalloc_lock);
2420 	ASSERT(list_empty(&inode->delalloc_inodes));
2421 	list_add_tail(&inode->delalloc_inodes, &root->delalloc_inodes);
2422 	root->nr_delalloc_inodes++;
2423 	if (root->nr_delalloc_inodes == 1) {
2424 		spin_lock(&fs_info->delalloc_root_lock);
2425 		ASSERT(list_empty(&root->delalloc_root));
2426 		list_add_tail(&root->delalloc_root, &fs_info->delalloc_roots);
2427 		spin_unlock(&fs_info->delalloc_root_lock);
2428 	}
2429 	spin_unlock(&root->delalloc_lock);
2430 }
2431 
2432 void btrfs_del_delalloc_inode(struct btrfs_inode *inode)
2433 {
2434 	struct btrfs_root *root = inode->root;
2435 	struct btrfs_fs_info *fs_info = root->fs_info;
2436 
2437 	lockdep_assert_held(&root->delalloc_lock);
2438 
2439 	/*
2440 	 * We may be called after the inode was already deleted from the list,
2441 	 * namely in the transaction abort path btrfs_destroy_delalloc_inodes(),
2442 	 * and then later through btrfs_clear_delalloc_extent() while the inode
2443 	 * still has ->delalloc_bytes > 0.
2444 	 */
2445 	if (!list_empty(&inode->delalloc_inodes)) {
2446 		list_del_init(&inode->delalloc_inodes);
2447 		root->nr_delalloc_inodes--;
2448 		if (!root->nr_delalloc_inodes) {
2449 			ASSERT(list_empty(&root->delalloc_inodes));
2450 			spin_lock(&fs_info->delalloc_root_lock);
2451 			ASSERT(!list_empty(&root->delalloc_root));
2452 			list_del_init(&root->delalloc_root);
2453 			spin_unlock(&fs_info->delalloc_root_lock);
2454 		}
2455 	}
2456 }
2457 
2458 /*
2459  * Properly track delayed allocation bytes in the inode and to maintain the
2460  * list of inodes that have pending delalloc work to be done.
2461  */
2462 void btrfs_set_delalloc_extent(struct btrfs_inode *inode, struct extent_state *state,
2463 			       u32 bits)
2464 {
2465 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2466 
2467 	lockdep_assert_held(&inode->io_tree.lock);
2468 
2469 	if ((bits & EXTENT_DEFRAG) && !(bits & EXTENT_DELALLOC))
2470 		WARN_ON(1);
2471 	/*
2472 	 * set_bit and clear bit hooks normally require _irqsave/restore
2473 	 * but in this case, we are only testing for the DELALLOC
2474 	 * bit, which is only set or cleared with irqs on
2475 	 */
2476 	if (!(state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
2477 		u64 len = state->end + 1 - state->start;
2478 		u64 prev_delalloc_bytes;
2479 		u32 num_extents = count_max_extents(fs_info, len);
2480 
2481 		spin_lock(&inode->lock);
2482 		btrfs_mod_outstanding_extents(inode, num_extents);
2483 		spin_unlock(&inode->lock);
2484 
2485 		/* For sanity tests */
2486 		if (btrfs_is_testing(fs_info))
2487 			return;
2488 
2489 		percpu_counter_add_batch(&fs_info->delalloc_bytes, len,
2490 					 fs_info->delalloc_batch);
2491 		spin_lock(&inode->lock);
2492 		prev_delalloc_bytes = inode->delalloc_bytes;
2493 		inode->delalloc_bytes += len;
2494 		if (bits & EXTENT_DEFRAG)
2495 			inode->defrag_bytes += len;
2496 		spin_unlock(&inode->lock);
2497 
2498 		/*
2499 		 * We don't need to be under the protection of the inode's lock,
2500 		 * because we are called while holding the inode's io_tree lock
2501 		 * and are therefore protected against concurrent calls of this
2502 		 * function and btrfs_clear_delalloc_extent().
2503 		 */
2504 		if (!btrfs_is_free_space_inode(inode) && prev_delalloc_bytes == 0)
2505 			btrfs_add_delalloc_inode(inode);
2506 	}
2507 
2508 	if (!(state->state & EXTENT_DELALLOC_NEW) &&
2509 	    (bits & EXTENT_DELALLOC_NEW)) {
2510 		spin_lock(&inode->lock);
2511 		inode->new_delalloc_bytes += state->end + 1 - state->start;
2512 		spin_unlock(&inode->lock);
2513 	}
2514 }
2515 
2516 /*
2517  * Once a range is no longer delalloc this function ensures that proper
2518  * accounting happens.
2519  */
2520 void btrfs_clear_delalloc_extent(struct btrfs_inode *inode,
2521 				 struct extent_state *state, u32 bits)
2522 {
2523 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2524 	u64 len = state->end + 1 - state->start;
2525 	u32 num_extents = count_max_extents(fs_info, len);
2526 
2527 	lockdep_assert_held(&inode->io_tree.lock);
2528 
2529 	if ((state->state & EXTENT_DEFRAG) && (bits & EXTENT_DEFRAG)) {
2530 		spin_lock(&inode->lock);
2531 		inode->defrag_bytes -= len;
2532 		spin_unlock(&inode->lock);
2533 	}
2534 
2535 	/*
2536 	 * set_bit and clear bit hooks normally require _irqsave/restore
2537 	 * but in this case, we are only testing for the DELALLOC
2538 	 * bit, which is only set or cleared with irqs on
2539 	 */
2540 	if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
2541 		struct btrfs_root *root = inode->root;
2542 		u64 new_delalloc_bytes;
2543 
2544 		spin_lock(&inode->lock);
2545 		btrfs_mod_outstanding_extents(inode, -num_extents);
2546 		spin_unlock(&inode->lock);
2547 
2548 		/*
2549 		 * We don't reserve metadata space for space cache inodes so we
2550 		 * don't need to call delalloc_release_metadata if there is an
2551 		 * error.
2552 		 */
2553 		if (bits & EXTENT_CLEAR_META_RESV &&
2554 		    root != fs_info->tree_root)
2555 			btrfs_delalloc_release_metadata(inode, len, true);
2556 
2557 		/* For sanity tests. */
2558 		if (btrfs_is_testing(fs_info))
2559 			return;
2560 
2561 		if (!btrfs_is_data_reloc_root(root) &&
2562 		    !btrfs_is_free_space_inode(inode) &&
2563 		    !(state->state & EXTENT_NORESERVE) &&
2564 		    (bits & EXTENT_CLEAR_DATA_RESV))
2565 			btrfs_free_reserved_data_space_noquota(fs_info, len);
2566 
2567 		percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
2568 					 fs_info->delalloc_batch);
2569 		spin_lock(&inode->lock);
2570 		inode->delalloc_bytes -= len;
2571 		new_delalloc_bytes = inode->delalloc_bytes;
2572 		spin_unlock(&inode->lock);
2573 
2574 		/*
2575 		 * We don't need to be under the protection of the inode's lock,
2576 		 * because we are called while holding the inode's io_tree lock
2577 		 * and are therefore protected against concurrent calls of this
2578 		 * function and btrfs_set_delalloc_extent().
2579 		 */
2580 		if (!btrfs_is_free_space_inode(inode) && new_delalloc_bytes == 0) {
2581 			spin_lock(&root->delalloc_lock);
2582 			btrfs_del_delalloc_inode(inode);
2583 			spin_unlock(&root->delalloc_lock);
2584 		}
2585 	}
2586 
2587 	if ((state->state & EXTENT_DELALLOC_NEW) &&
2588 	    (bits & EXTENT_DELALLOC_NEW)) {
2589 		spin_lock(&inode->lock);
2590 		ASSERT(inode->new_delalloc_bytes >= len);
2591 		inode->new_delalloc_bytes -= len;
2592 		if (bits & EXTENT_ADD_INODE_BYTES)
2593 			inode_add_bytes(&inode->vfs_inode, len);
2594 		spin_unlock(&inode->lock);
2595 	}
2596 }
2597 
2598 /*
2599  * given a list of ordered sums record them in the inode.  This happens
2600  * at IO completion time based on sums calculated at bio submission time.
2601  */
2602 static int add_pending_csums(struct btrfs_trans_handle *trans,
2603 			     struct list_head *list)
2604 {
2605 	struct btrfs_ordered_sum *sum;
2606 	struct btrfs_root *csum_root = NULL;
2607 	int ret;
2608 
2609 	list_for_each_entry(sum, list, list) {
2610 		trans->adding_csums = true;
2611 		if (!csum_root)
2612 			csum_root = btrfs_csum_root(trans->fs_info,
2613 						    sum->logical);
2614 		ret = btrfs_csum_file_blocks(trans, csum_root, sum);
2615 		trans->adding_csums = false;
2616 		if (ret)
2617 			return ret;
2618 	}
2619 	return 0;
2620 }
2621 
2622 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
2623 					 const u64 start,
2624 					 const u64 len,
2625 					 struct extent_state **cached_state)
2626 {
2627 	u64 search_start = start;
2628 	const u64 end = start + len - 1;
2629 
2630 	while (search_start < end) {
2631 		const u64 search_len = end - search_start + 1;
2632 		struct extent_map *em;
2633 		u64 em_len;
2634 		int ret = 0;
2635 
2636 		em = btrfs_get_extent(inode, NULL, search_start, search_len);
2637 		if (IS_ERR(em))
2638 			return PTR_ERR(em);
2639 
2640 		if (em->disk_bytenr != EXTENT_MAP_HOLE)
2641 			goto next;
2642 
2643 		em_len = em->len;
2644 		if (em->start < search_start)
2645 			em_len -= search_start - em->start;
2646 		if (em_len > search_len)
2647 			em_len = search_len;
2648 
2649 		ret = set_extent_bit(&inode->io_tree, search_start,
2650 				     search_start + em_len - 1,
2651 				     EXTENT_DELALLOC_NEW, cached_state);
2652 next:
2653 		search_start = extent_map_end(em);
2654 		free_extent_map(em);
2655 		if (ret)
2656 			return ret;
2657 	}
2658 	return 0;
2659 }
2660 
2661 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
2662 			      unsigned int extra_bits,
2663 			      struct extent_state **cached_state)
2664 {
2665 	WARN_ON(PAGE_ALIGNED(end));
2666 
2667 	if (start >= i_size_read(&inode->vfs_inode) &&
2668 	    !(inode->flags & BTRFS_INODE_PREALLOC)) {
2669 		/*
2670 		 * There can't be any extents following eof in this case so just
2671 		 * set the delalloc new bit for the range directly.
2672 		 */
2673 		extra_bits |= EXTENT_DELALLOC_NEW;
2674 	} else {
2675 		int ret;
2676 
2677 		ret = btrfs_find_new_delalloc_bytes(inode, start,
2678 						    end + 1 - start,
2679 						    cached_state);
2680 		if (ret)
2681 			return ret;
2682 	}
2683 
2684 	return set_extent_bit(&inode->io_tree, start, end,
2685 			      EXTENT_DELALLOC | extra_bits, cached_state);
2686 }
2687 
2688 /* see btrfs_writepage_start_hook for details on why this is required */
2689 struct btrfs_writepage_fixup {
2690 	struct page *page;
2691 	struct btrfs_inode *inode;
2692 	struct btrfs_work work;
2693 };
2694 
2695 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
2696 {
2697 	struct btrfs_writepage_fixup *fixup =
2698 		container_of(work, struct btrfs_writepage_fixup, work);
2699 	struct btrfs_ordered_extent *ordered;
2700 	struct extent_state *cached_state = NULL;
2701 	struct extent_changeset *data_reserved = NULL;
2702 	struct page *page = fixup->page;
2703 	struct btrfs_inode *inode = fixup->inode;
2704 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2705 	u64 page_start = page_offset(page);
2706 	u64 page_end = page_offset(page) + PAGE_SIZE - 1;
2707 	int ret = 0;
2708 	bool free_delalloc_space = true;
2709 
2710 	/*
2711 	 * This is similar to page_mkwrite, we need to reserve the space before
2712 	 * we take the page lock.
2713 	 */
2714 	ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
2715 					   PAGE_SIZE);
2716 again:
2717 	lock_page(page);
2718 
2719 	/*
2720 	 * Before we queued this fixup, we took a reference on the page.
2721 	 * page->mapping may go NULL, but it shouldn't be moved to a different
2722 	 * address space.
2723 	 */
2724 	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
2725 		/*
2726 		 * Unfortunately this is a little tricky, either
2727 		 *
2728 		 * 1) We got here and our page had already been dealt with and
2729 		 *    we reserved our space, thus ret == 0, so we need to just
2730 		 *    drop our space reservation and bail.  This can happen the
2731 		 *    first time we come into the fixup worker, or could happen
2732 		 *    while waiting for the ordered extent.
2733 		 * 2) Our page was already dealt with, but we happened to get an
2734 		 *    ENOSPC above from the btrfs_delalloc_reserve_space.  In
2735 		 *    this case we obviously don't have anything to release, but
2736 		 *    because the page was already dealt with we don't want to
2737 		 *    mark the page with an error, so make sure we're resetting
2738 		 *    ret to 0.  This is why we have this check _before_ the ret
2739 		 *    check, because we do not want to have a surprise ENOSPC
2740 		 *    when the page was already properly dealt with.
2741 		 */
2742 		if (!ret) {
2743 			btrfs_delalloc_release_extents(inode, PAGE_SIZE);
2744 			btrfs_delalloc_release_space(inode, data_reserved,
2745 						     page_start, PAGE_SIZE,
2746 						     true);
2747 		}
2748 		ret = 0;
2749 		goto out_page;
2750 	}
2751 
2752 	/*
2753 	 * We can't mess with the page state unless it is locked, so now that
2754 	 * it is locked bail if we failed to make our space reservation.
2755 	 */
2756 	if (ret)
2757 		goto out_page;
2758 
2759 	lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
2760 
2761 	/* already ordered? We're done */
2762 	if (PageOrdered(page))
2763 		goto out_reserved;
2764 
2765 	ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
2766 	if (ordered) {
2767 		unlock_extent(&inode->io_tree, page_start, page_end,
2768 			      &cached_state);
2769 		unlock_page(page);
2770 		btrfs_start_ordered_extent(ordered);
2771 		btrfs_put_ordered_extent(ordered);
2772 		goto again;
2773 	}
2774 
2775 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
2776 					&cached_state);
2777 	if (ret)
2778 		goto out_reserved;
2779 
2780 	/*
2781 	 * Everything went as planned, we're now the owner of a dirty page with
2782 	 * delayed allocation bits set and space reserved for our COW
2783 	 * destination.
2784 	 *
2785 	 * The page was dirty when we started, nothing should have cleaned it.
2786 	 */
2787 	BUG_ON(!PageDirty(page));
2788 	free_delalloc_space = false;
2789 out_reserved:
2790 	btrfs_delalloc_release_extents(inode, PAGE_SIZE);
2791 	if (free_delalloc_space)
2792 		btrfs_delalloc_release_space(inode, data_reserved, page_start,
2793 					     PAGE_SIZE, true);
2794 	unlock_extent(&inode->io_tree, page_start, page_end, &cached_state);
2795 out_page:
2796 	if (ret) {
2797 		/*
2798 		 * We hit ENOSPC or other errors.  Update the mapping and page
2799 		 * to reflect the errors and clean the page.
2800 		 */
2801 		mapping_set_error(page->mapping, ret);
2802 		btrfs_mark_ordered_io_finished(inode, page, page_start,
2803 					       PAGE_SIZE, !ret);
2804 		clear_page_dirty_for_io(page);
2805 	}
2806 	btrfs_folio_clear_checked(fs_info, page_folio(page), page_start, PAGE_SIZE);
2807 	unlock_page(page);
2808 	put_page(page);
2809 	kfree(fixup);
2810 	extent_changeset_free(data_reserved);
2811 	/*
2812 	 * As a precaution, do a delayed iput in case it would be the last iput
2813 	 * that could need flushing space. Recursing back to fixup worker would
2814 	 * deadlock.
2815 	 */
2816 	btrfs_add_delayed_iput(inode);
2817 }
2818 
2819 /*
2820  * There are a few paths in the higher layers of the kernel that directly
2821  * set the page dirty bit without asking the filesystem if it is a
2822  * good idea.  This causes problems because we want to make sure COW
2823  * properly happens and the data=ordered rules are followed.
2824  *
2825  * In our case any range that doesn't have the ORDERED bit set
2826  * hasn't been properly setup for IO.  We kick off an async process
2827  * to fix it up.  The async helper will wait for ordered extents, set
2828  * the delalloc bit and make it safe to write the page.
2829  */
2830 int btrfs_writepage_cow_fixup(struct page *page)
2831 {
2832 	struct inode *inode = page->mapping->host;
2833 	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
2834 	struct btrfs_writepage_fixup *fixup;
2835 
2836 	/* This page has ordered extent covering it already */
2837 	if (PageOrdered(page))
2838 		return 0;
2839 
2840 	/*
2841 	 * PageChecked is set below when we create a fixup worker for this page,
2842 	 * don't try to create another one if we're already PageChecked()
2843 	 *
2844 	 * The extent_io writepage code will redirty the page if we send back
2845 	 * EAGAIN.
2846 	 */
2847 	if (PageChecked(page))
2848 		return -EAGAIN;
2849 
2850 	fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
2851 	if (!fixup)
2852 		return -EAGAIN;
2853 
2854 	/*
2855 	 * We are already holding a reference to this inode from
2856 	 * write_cache_pages.  We need to hold it because the space reservation
2857 	 * takes place outside of the page lock, and we can't trust
2858 	 * page->mapping outside of the page lock.
2859 	 */
2860 	ihold(inode);
2861 	btrfs_folio_set_checked(fs_info, page_folio(page), page_offset(page), PAGE_SIZE);
2862 	get_page(page);
2863 	btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL);
2864 	fixup->page = page;
2865 	fixup->inode = BTRFS_I(inode);
2866 	btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
2867 
2868 	return -EAGAIN;
2869 }
2870 
2871 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
2872 				       struct btrfs_inode *inode, u64 file_pos,
2873 				       struct btrfs_file_extent_item *stack_fi,
2874 				       const bool update_inode_bytes,
2875 				       u64 qgroup_reserved)
2876 {
2877 	struct btrfs_root *root = inode->root;
2878 	const u64 sectorsize = root->fs_info->sectorsize;
2879 	struct btrfs_path *path;
2880 	struct extent_buffer *leaf;
2881 	struct btrfs_key ins;
2882 	u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi);
2883 	u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi);
2884 	u64 offset = btrfs_stack_file_extent_offset(stack_fi);
2885 	u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi);
2886 	u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi);
2887 	struct btrfs_drop_extents_args drop_args = { 0 };
2888 	int ret;
2889 
2890 	path = btrfs_alloc_path();
2891 	if (!path)
2892 		return -ENOMEM;
2893 
2894 	/*
2895 	 * we may be replacing one extent in the tree with another.
2896 	 * The new extent is pinned in the extent map, and we don't want
2897 	 * to drop it from the cache until it is completely in the btree.
2898 	 *
2899 	 * So, tell btrfs_drop_extents to leave this extent in the cache.
2900 	 * the caller is expected to unpin it and allow it to be merged
2901 	 * with the others.
2902 	 */
2903 	drop_args.path = path;
2904 	drop_args.start = file_pos;
2905 	drop_args.end = file_pos + num_bytes;
2906 	drop_args.replace_extent = true;
2907 	drop_args.extent_item_size = sizeof(*stack_fi);
2908 	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
2909 	if (ret)
2910 		goto out;
2911 
2912 	if (!drop_args.extent_inserted) {
2913 		ins.objectid = btrfs_ino(inode);
2914 		ins.offset = file_pos;
2915 		ins.type = BTRFS_EXTENT_DATA_KEY;
2916 
2917 		ret = btrfs_insert_empty_item(trans, root, path, &ins,
2918 					      sizeof(*stack_fi));
2919 		if (ret)
2920 			goto out;
2921 	}
2922 	leaf = path->nodes[0];
2923 	btrfs_set_stack_file_extent_generation(stack_fi, trans->transid);
2924 	write_extent_buffer(leaf, stack_fi,
2925 			btrfs_item_ptr_offset(leaf, path->slots[0]),
2926 			sizeof(struct btrfs_file_extent_item));
2927 
2928 	btrfs_mark_buffer_dirty(trans, leaf);
2929 	btrfs_release_path(path);
2930 
2931 	/*
2932 	 * If we dropped an inline extent here, we know the range where it is
2933 	 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the
2934 	 * number of bytes only for that range containing the inline extent.
2935 	 * The remaining of the range will be processed when clearning the
2936 	 * EXTENT_DELALLOC_BIT bit through the ordered extent completion.
2937 	 */
2938 	if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) {
2939 		u64 inline_size = round_down(drop_args.bytes_found, sectorsize);
2940 
2941 		inline_size = drop_args.bytes_found - inline_size;
2942 		btrfs_update_inode_bytes(inode, sectorsize, inline_size);
2943 		drop_args.bytes_found -= inline_size;
2944 		num_bytes -= sectorsize;
2945 	}
2946 
2947 	if (update_inode_bytes)
2948 		btrfs_update_inode_bytes(inode, num_bytes, drop_args.bytes_found);
2949 
2950 	ins.objectid = disk_bytenr;
2951 	ins.offset = disk_num_bytes;
2952 	ins.type = BTRFS_EXTENT_ITEM_KEY;
2953 
2954 	ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes);
2955 	if (ret)
2956 		goto out;
2957 
2958 	ret = btrfs_alloc_reserved_file_extent(trans, root, btrfs_ino(inode),
2959 					       file_pos - offset,
2960 					       qgroup_reserved, &ins);
2961 out:
2962 	btrfs_free_path(path);
2963 
2964 	return ret;
2965 }
2966 
2967 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
2968 					 u64 start, u64 len)
2969 {
2970 	struct btrfs_block_group *cache;
2971 
2972 	cache = btrfs_lookup_block_group(fs_info, start);
2973 	ASSERT(cache);
2974 
2975 	spin_lock(&cache->lock);
2976 	cache->delalloc_bytes -= len;
2977 	spin_unlock(&cache->lock);
2978 
2979 	btrfs_put_block_group(cache);
2980 }
2981 
2982 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans,
2983 					     struct btrfs_ordered_extent *oe)
2984 {
2985 	struct btrfs_file_extent_item stack_fi;
2986 	bool update_inode_bytes;
2987 	u64 num_bytes = oe->num_bytes;
2988 	u64 ram_bytes = oe->ram_bytes;
2989 
2990 	memset(&stack_fi, 0, sizeof(stack_fi));
2991 	btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG);
2992 	btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr);
2993 	btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi,
2994 						   oe->disk_num_bytes);
2995 	btrfs_set_stack_file_extent_offset(&stack_fi, oe->offset);
2996 	if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags))
2997 		num_bytes = oe->truncated_len;
2998 	btrfs_set_stack_file_extent_num_bytes(&stack_fi, num_bytes);
2999 	btrfs_set_stack_file_extent_ram_bytes(&stack_fi, ram_bytes);
3000 	btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type);
3001 	/* Encryption and other encoding is reserved and all 0 */
3002 
3003 	/*
3004 	 * For delalloc, when completing an ordered extent we update the inode's
3005 	 * bytes when clearing the range in the inode's io tree, so pass false
3006 	 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(),
3007 	 * except if the ordered extent was truncated.
3008 	 */
3009 	update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) ||
3010 			     test_bit(BTRFS_ORDERED_ENCODED, &oe->flags) ||
3011 			     test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags);
3012 
3013 	return insert_reserved_file_extent(trans, oe->inode,
3014 					   oe->file_offset, &stack_fi,
3015 					   update_inode_bytes, oe->qgroup_rsv);
3016 }
3017 
3018 /*
3019  * As ordered data IO finishes, this gets called so we can finish
3020  * an ordered extent if the range of bytes in the file it covers are
3021  * fully written.
3022  */
3023 int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
3024 {
3025 	struct btrfs_inode *inode = ordered_extent->inode;
3026 	struct btrfs_root *root = inode->root;
3027 	struct btrfs_fs_info *fs_info = root->fs_info;
3028 	struct btrfs_trans_handle *trans = NULL;
3029 	struct extent_io_tree *io_tree = &inode->io_tree;
3030 	struct extent_state *cached_state = NULL;
3031 	u64 start, end;
3032 	int compress_type = 0;
3033 	int ret = 0;
3034 	u64 logical_len = ordered_extent->num_bytes;
3035 	bool freespace_inode;
3036 	bool truncated = false;
3037 	bool clear_reserved_extent = true;
3038 	unsigned int clear_bits = EXTENT_DEFRAG;
3039 
3040 	start = ordered_extent->file_offset;
3041 	end = start + ordered_extent->num_bytes - 1;
3042 
3043 	if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3044 	    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
3045 	    !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags) &&
3046 	    !test_bit(BTRFS_ORDERED_ENCODED, &ordered_extent->flags))
3047 		clear_bits |= EXTENT_DELALLOC_NEW;
3048 
3049 	freespace_inode = btrfs_is_free_space_inode(inode);
3050 	if (!freespace_inode)
3051 		btrfs_lockdep_acquire(fs_info, btrfs_ordered_extent);
3052 
3053 	if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
3054 		ret = -EIO;
3055 		goto out;
3056 	}
3057 
3058 	if (btrfs_is_zoned(fs_info))
3059 		btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr,
3060 					ordered_extent->disk_num_bytes);
3061 
3062 	if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
3063 		truncated = true;
3064 		logical_len = ordered_extent->truncated_len;
3065 		/* Truncated the entire extent, don't bother adding */
3066 		if (!logical_len)
3067 			goto out;
3068 	}
3069 
3070 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
3071 		BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
3072 
3073 		btrfs_inode_safe_disk_i_size_write(inode, 0);
3074 		if (freespace_inode)
3075 			trans = btrfs_join_transaction_spacecache(root);
3076 		else
3077 			trans = btrfs_join_transaction(root);
3078 		if (IS_ERR(trans)) {
3079 			ret = PTR_ERR(trans);
3080 			trans = NULL;
3081 			goto out;
3082 		}
3083 		trans->block_rsv = &inode->block_rsv;
3084 		ret = btrfs_update_inode_fallback(trans, inode);
3085 		if (ret) /* -ENOMEM or corruption */
3086 			btrfs_abort_transaction(trans, ret);
3087 		goto out;
3088 	}
3089 
3090 	clear_bits |= EXTENT_LOCKED;
3091 	lock_extent(io_tree, start, end, &cached_state);
3092 
3093 	if (freespace_inode)
3094 		trans = btrfs_join_transaction_spacecache(root);
3095 	else
3096 		trans = btrfs_join_transaction(root);
3097 	if (IS_ERR(trans)) {
3098 		ret = PTR_ERR(trans);
3099 		trans = NULL;
3100 		goto out;
3101 	}
3102 
3103 	trans->block_rsv = &inode->block_rsv;
3104 
3105 	ret = btrfs_insert_raid_extent(trans, ordered_extent);
3106 	if (ret)
3107 		goto out;
3108 
3109 	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
3110 		compress_type = ordered_extent->compress_type;
3111 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3112 		BUG_ON(compress_type);
3113 		ret = btrfs_mark_extent_written(trans, inode,
3114 						ordered_extent->file_offset,
3115 						ordered_extent->file_offset +
3116 						logical_len);
3117 		btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr,
3118 						  ordered_extent->disk_num_bytes);
3119 	} else {
3120 		BUG_ON(root == fs_info->tree_root);
3121 		ret = insert_ordered_extent_file_extent(trans, ordered_extent);
3122 		if (!ret) {
3123 			clear_reserved_extent = false;
3124 			btrfs_release_delalloc_bytes(fs_info,
3125 						ordered_extent->disk_bytenr,
3126 						ordered_extent->disk_num_bytes);
3127 		}
3128 	}
3129 	if (ret < 0) {
3130 		btrfs_abort_transaction(trans, ret);
3131 		goto out;
3132 	}
3133 
3134 	ret = unpin_extent_cache(inode, ordered_extent->file_offset,
3135 				 ordered_extent->num_bytes, trans->transid);
3136 	if (ret < 0) {
3137 		btrfs_abort_transaction(trans, ret);
3138 		goto out;
3139 	}
3140 
3141 	ret = add_pending_csums(trans, &ordered_extent->list);
3142 	if (ret) {
3143 		btrfs_abort_transaction(trans, ret);
3144 		goto out;
3145 	}
3146 
3147 	/*
3148 	 * If this is a new delalloc range, clear its new delalloc flag to
3149 	 * update the inode's number of bytes. This needs to be done first
3150 	 * before updating the inode item.
3151 	 */
3152 	if ((clear_bits & EXTENT_DELALLOC_NEW) &&
3153 	    !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags))
3154 		clear_extent_bit(&inode->io_tree, start, end,
3155 				 EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES,
3156 				 &cached_state);
3157 
3158 	btrfs_inode_safe_disk_i_size_write(inode, 0);
3159 	ret = btrfs_update_inode_fallback(trans, inode);
3160 	if (ret) { /* -ENOMEM or corruption */
3161 		btrfs_abort_transaction(trans, ret);
3162 		goto out;
3163 	}
3164 out:
3165 	clear_extent_bit(&inode->io_tree, start, end, clear_bits,
3166 			 &cached_state);
3167 
3168 	if (trans)
3169 		btrfs_end_transaction(trans);
3170 
3171 	if (ret || truncated) {
3172 		u64 unwritten_start = start;
3173 
3174 		/*
3175 		 * If we failed to finish this ordered extent for any reason we
3176 		 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
3177 		 * extent, and mark the inode with the error if it wasn't
3178 		 * already set.  Any error during writeback would have already
3179 		 * set the mapping error, so we need to set it if we're the ones
3180 		 * marking this ordered extent as failed.
3181 		 */
3182 		if (ret)
3183 			btrfs_mark_ordered_extent_error(ordered_extent);
3184 
3185 		if (truncated)
3186 			unwritten_start += logical_len;
3187 		clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
3188 
3189 		/*
3190 		 * Drop extent maps for the part of the extent we didn't write.
3191 		 *
3192 		 * We have an exception here for the free_space_inode, this is
3193 		 * because when we do btrfs_get_extent() on the free space inode
3194 		 * we will search the commit root.  If this is a new block group
3195 		 * we won't find anything, and we will trip over the assert in
3196 		 * writepage where we do ASSERT(em->block_start !=
3197 		 * EXTENT_MAP_HOLE).
3198 		 *
3199 		 * Theoretically we could also skip this for any NOCOW extent as
3200 		 * we don't mess with the extent map tree in the NOCOW case, but
3201 		 * for now simply skip this if we are the free space inode.
3202 		 */
3203 		if (!btrfs_is_free_space_inode(inode))
3204 			btrfs_drop_extent_map_range(inode, unwritten_start,
3205 						    end, false);
3206 
3207 		/*
3208 		 * If the ordered extent had an IOERR or something else went
3209 		 * wrong we need to return the space for this ordered extent
3210 		 * back to the allocator.  We only free the extent in the
3211 		 * truncated case if we didn't write out the extent at all.
3212 		 *
3213 		 * If we made it past insert_reserved_file_extent before we
3214 		 * errored out then we don't need to do this as the accounting
3215 		 * has already been done.
3216 		 */
3217 		if ((ret || !logical_len) &&
3218 		    clear_reserved_extent &&
3219 		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3220 		    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3221 			/*
3222 			 * Discard the range before returning it back to the
3223 			 * free space pool
3224 			 */
3225 			if (ret && btrfs_test_opt(fs_info, DISCARD_SYNC))
3226 				btrfs_discard_extent(fs_info,
3227 						ordered_extent->disk_bytenr,
3228 						ordered_extent->disk_num_bytes,
3229 						NULL);
3230 			btrfs_free_reserved_extent(fs_info,
3231 					ordered_extent->disk_bytenr,
3232 					ordered_extent->disk_num_bytes, 1);
3233 			/*
3234 			 * Actually free the qgroup rsv which was released when
3235 			 * the ordered extent was created.
3236 			 */
3237 			btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(inode->root),
3238 						  ordered_extent->qgroup_rsv,
3239 						  BTRFS_QGROUP_RSV_DATA);
3240 		}
3241 	}
3242 
3243 	/*
3244 	 * This needs to be done to make sure anybody waiting knows we are done
3245 	 * updating everything for this ordered extent.
3246 	 */
3247 	btrfs_remove_ordered_extent(inode, ordered_extent);
3248 
3249 	/* once for us */
3250 	btrfs_put_ordered_extent(ordered_extent);
3251 	/* once for the tree */
3252 	btrfs_put_ordered_extent(ordered_extent);
3253 
3254 	return ret;
3255 }
3256 
3257 int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered)
3258 {
3259 	if (btrfs_is_zoned(ordered->inode->root->fs_info) &&
3260 	    !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
3261 	    list_empty(&ordered->bioc_list))
3262 		btrfs_finish_ordered_zoned(ordered);
3263 	return btrfs_finish_one_ordered(ordered);
3264 }
3265 
3266 /*
3267  * Verify the checksum for a single sector without any extra action that depend
3268  * on the type of I/O.
3269  */
3270 int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page,
3271 			    u32 pgoff, u8 *csum, const u8 * const csum_expected)
3272 {
3273 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
3274 	char *kaddr;
3275 
3276 	ASSERT(pgoff + fs_info->sectorsize <= PAGE_SIZE);
3277 
3278 	shash->tfm = fs_info->csum_shash;
3279 
3280 	kaddr = kmap_local_page(page) + pgoff;
3281 	crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum);
3282 	kunmap_local(kaddr);
3283 
3284 	if (memcmp(csum, csum_expected, fs_info->csum_size))
3285 		return -EIO;
3286 	return 0;
3287 }
3288 
3289 /*
3290  * Verify the checksum of a single data sector.
3291  *
3292  * @bbio:	btrfs_io_bio which contains the csum
3293  * @dev:	device the sector is on
3294  * @bio_offset:	offset to the beginning of the bio (in bytes)
3295  * @bv:		bio_vec to check
3296  *
3297  * Check if the checksum on a data block is valid.  When a checksum mismatch is
3298  * detected, report the error and fill the corrupted range with zero.
3299  *
3300  * Return %true if the sector is ok or had no checksum to start with, else %false.
3301  */
3302 bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev,
3303 			u32 bio_offset, struct bio_vec *bv)
3304 {
3305 	struct btrfs_inode *inode = bbio->inode;
3306 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3307 	u64 file_offset = bbio->file_offset + bio_offset;
3308 	u64 end = file_offset + bv->bv_len - 1;
3309 	u8 *csum_expected;
3310 	u8 csum[BTRFS_CSUM_SIZE];
3311 
3312 	ASSERT(bv->bv_len == fs_info->sectorsize);
3313 
3314 	if (!bbio->csum)
3315 		return true;
3316 
3317 	if (btrfs_is_data_reloc_root(inode->root) &&
3318 	    test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM,
3319 			   NULL)) {
3320 		/* Skip the range without csum for data reloc inode */
3321 		clear_extent_bits(&inode->io_tree, file_offset, end,
3322 				  EXTENT_NODATASUM);
3323 		return true;
3324 	}
3325 
3326 	csum_expected = bbio->csum + (bio_offset >> fs_info->sectorsize_bits) *
3327 				fs_info->csum_size;
3328 	if (btrfs_check_sector_csum(fs_info, bv->bv_page, bv->bv_offset, csum,
3329 				    csum_expected))
3330 		goto zeroit;
3331 	return true;
3332 
3333 zeroit:
3334 	btrfs_print_data_csum_error(inode, file_offset, csum, csum_expected,
3335 				    bbio->mirror_num);
3336 	if (dev)
3337 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS);
3338 	memzero_bvec(bv);
3339 	return false;
3340 }
3341 
3342 /*
3343  * Perform a delayed iput on @inode.
3344  *
3345  * @inode: The inode we want to perform iput on
3346  *
3347  * This function uses the generic vfs_inode::i_count to track whether we should
3348  * just decrement it (in case it's > 1) or if this is the last iput then link
3349  * the inode to the delayed iput machinery. Delayed iputs are processed at
3350  * transaction commit time/superblock commit/cleaner kthread.
3351  */
3352 void btrfs_add_delayed_iput(struct btrfs_inode *inode)
3353 {
3354 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3355 	unsigned long flags;
3356 
3357 	if (atomic_add_unless(&inode->vfs_inode.i_count, -1, 1))
3358 		return;
3359 
3360 	atomic_inc(&fs_info->nr_delayed_iputs);
3361 	/*
3362 	 * Need to be irq safe here because we can be called from either an irq
3363 	 * context (see bio.c and btrfs_put_ordered_extent()) or a non-irq
3364 	 * context.
3365 	 */
3366 	spin_lock_irqsave(&fs_info->delayed_iput_lock, flags);
3367 	ASSERT(list_empty(&inode->delayed_iput));
3368 	list_add_tail(&inode->delayed_iput, &fs_info->delayed_iputs);
3369 	spin_unlock_irqrestore(&fs_info->delayed_iput_lock, flags);
3370 	if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags))
3371 		wake_up_process(fs_info->cleaner_kthread);
3372 }
3373 
3374 static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info,
3375 				    struct btrfs_inode *inode)
3376 {
3377 	list_del_init(&inode->delayed_iput);
3378 	spin_unlock_irq(&fs_info->delayed_iput_lock);
3379 	iput(&inode->vfs_inode);
3380 	if (atomic_dec_and_test(&fs_info->nr_delayed_iputs))
3381 		wake_up(&fs_info->delayed_iputs_wait);
3382 	spin_lock_irq(&fs_info->delayed_iput_lock);
3383 }
3384 
3385 static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info,
3386 				   struct btrfs_inode *inode)
3387 {
3388 	if (!list_empty(&inode->delayed_iput)) {
3389 		spin_lock_irq(&fs_info->delayed_iput_lock);
3390 		if (!list_empty(&inode->delayed_iput))
3391 			run_delayed_iput_locked(fs_info, inode);
3392 		spin_unlock_irq(&fs_info->delayed_iput_lock);
3393 	}
3394 }
3395 
3396 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
3397 {
3398 	/*
3399 	 * btrfs_put_ordered_extent() can run in irq context (see bio.c), which
3400 	 * calls btrfs_add_delayed_iput() and that needs to lock
3401 	 * fs_info->delayed_iput_lock. So we need to disable irqs here to
3402 	 * prevent a deadlock.
3403 	 */
3404 	spin_lock_irq(&fs_info->delayed_iput_lock);
3405 	while (!list_empty(&fs_info->delayed_iputs)) {
3406 		struct btrfs_inode *inode;
3407 
3408 		inode = list_first_entry(&fs_info->delayed_iputs,
3409 				struct btrfs_inode, delayed_iput);
3410 		run_delayed_iput_locked(fs_info, inode);
3411 		if (need_resched()) {
3412 			spin_unlock_irq(&fs_info->delayed_iput_lock);
3413 			cond_resched();
3414 			spin_lock_irq(&fs_info->delayed_iput_lock);
3415 		}
3416 	}
3417 	spin_unlock_irq(&fs_info->delayed_iput_lock);
3418 }
3419 
3420 /*
3421  * Wait for flushing all delayed iputs
3422  *
3423  * @fs_info:  the filesystem
3424  *
3425  * This will wait on any delayed iputs that are currently running with KILLABLE
3426  * set.  Once they are all done running we will return, unless we are killed in
3427  * which case we return EINTR. This helps in user operations like fallocate etc
3428  * that might get blocked on the iputs.
3429  *
3430  * Return EINTR if we were killed, 0 if nothing's pending
3431  */
3432 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info)
3433 {
3434 	int ret = wait_event_killable(fs_info->delayed_iputs_wait,
3435 			atomic_read(&fs_info->nr_delayed_iputs) == 0);
3436 	if (ret)
3437 		return -EINTR;
3438 	return 0;
3439 }
3440 
3441 /*
3442  * This creates an orphan entry for the given inode in case something goes wrong
3443  * in the middle of an unlink.
3444  */
3445 int btrfs_orphan_add(struct btrfs_trans_handle *trans,
3446 		     struct btrfs_inode *inode)
3447 {
3448 	int ret;
3449 
3450 	ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode));
3451 	if (ret && ret != -EEXIST) {
3452 		btrfs_abort_transaction(trans, ret);
3453 		return ret;
3454 	}
3455 
3456 	return 0;
3457 }
3458 
3459 /*
3460  * We have done the delete so we can go ahead and remove the orphan item for
3461  * this particular inode.
3462  */
3463 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3464 			    struct btrfs_inode *inode)
3465 {
3466 	return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode));
3467 }
3468 
3469 /*
3470  * this cleans up any orphans that may be left on the list from the last use
3471  * of this root.
3472  */
3473 int btrfs_orphan_cleanup(struct btrfs_root *root)
3474 {
3475 	struct btrfs_fs_info *fs_info = root->fs_info;
3476 	struct btrfs_path *path;
3477 	struct extent_buffer *leaf;
3478 	struct btrfs_key key, found_key;
3479 	struct btrfs_trans_handle *trans;
3480 	struct inode *inode;
3481 	u64 last_objectid = 0;
3482 	int ret = 0, nr_unlink = 0;
3483 
3484 	if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state))
3485 		return 0;
3486 
3487 	path = btrfs_alloc_path();
3488 	if (!path) {
3489 		ret = -ENOMEM;
3490 		goto out;
3491 	}
3492 	path->reada = READA_BACK;
3493 
3494 	key.objectid = BTRFS_ORPHAN_OBJECTID;
3495 	key.type = BTRFS_ORPHAN_ITEM_KEY;
3496 	key.offset = (u64)-1;
3497 
3498 	while (1) {
3499 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3500 		if (ret < 0)
3501 			goto out;
3502 
3503 		/*
3504 		 * if ret == 0 means we found what we were searching for, which
3505 		 * is weird, but possible, so only screw with path if we didn't
3506 		 * find the key and see if we have stuff that matches
3507 		 */
3508 		if (ret > 0) {
3509 			ret = 0;
3510 			if (path->slots[0] == 0)
3511 				break;
3512 			path->slots[0]--;
3513 		}
3514 
3515 		/* pull out the item */
3516 		leaf = path->nodes[0];
3517 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3518 
3519 		/* make sure the item matches what we want */
3520 		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3521 			break;
3522 		if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
3523 			break;
3524 
3525 		/* release the path since we're done with it */
3526 		btrfs_release_path(path);
3527 
3528 		/*
3529 		 * this is where we are basically btrfs_lookup, without the
3530 		 * crossing root thing.  we store the inode number in the
3531 		 * offset of the orphan item.
3532 		 */
3533 
3534 		if (found_key.offset == last_objectid) {
3535 			/*
3536 			 * We found the same inode as before. This means we were
3537 			 * not able to remove its items via eviction triggered
3538 			 * by an iput(). A transaction abort may have happened,
3539 			 * due to -ENOSPC for example, so try to grab the error
3540 			 * that lead to a transaction abort, if any.
3541 			 */
3542 			btrfs_err(fs_info,
3543 				  "Error removing orphan entry, stopping orphan cleanup");
3544 			ret = BTRFS_FS_ERROR(fs_info) ?: -EINVAL;
3545 			goto out;
3546 		}
3547 
3548 		last_objectid = found_key.offset;
3549 
3550 		found_key.objectid = found_key.offset;
3551 		found_key.type = BTRFS_INODE_ITEM_KEY;
3552 		found_key.offset = 0;
3553 		inode = btrfs_iget(last_objectid, root);
3554 		if (IS_ERR(inode)) {
3555 			ret = PTR_ERR(inode);
3556 			inode = NULL;
3557 			if (ret != -ENOENT)
3558 				goto out;
3559 		}
3560 
3561 		if (!inode && root == fs_info->tree_root) {
3562 			struct btrfs_root *dead_root;
3563 			int is_dead_root = 0;
3564 
3565 			/*
3566 			 * This is an orphan in the tree root. Currently these
3567 			 * could come from 2 sources:
3568 			 *  a) a root (snapshot/subvolume) deletion in progress
3569 			 *  b) a free space cache inode
3570 			 * We need to distinguish those two, as the orphan item
3571 			 * for a root must not get deleted before the deletion
3572 			 * of the snapshot/subvolume's tree completes.
3573 			 *
3574 			 * btrfs_find_orphan_roots() ran before us, which has
3575 			 * found all deleted roots and loaded them into
3576 			 * fs_info->fs_roots_radix. So here we can find if an
3577 			 * orphan item corresponds to a deleted root by looking
3578 			 * up the root from that radix tree.
3579 			 */
3580 
3581 			spin_lock(&fs_info->fs_roots_radix_lock);
3582 			dead_root = radix_tree_lookup(&fs_info->fs_roots_radix,
3583 							 (unsigned long)found_key.objectid);
3584 			if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0)
3585 				is_dead_root = 1;
3586 			spin_unlock(&fs_info->fs_roots_radix_lock);
3587 
3588 			if (is_dead_root) {
3589 				/* prevent this orphan from being found again */
3590 				key.offset = found_key.objectid - 1;
3591 				continue;
3592 			}
3593 
3594 		}
3595 
3596 		/*
3597 		 * If we have an inode with links, there are a couple of
3598 		 * possibilities:
3599 		 *
3600 		 * 1. We were halfway through creating fsverity metadata for the
3601 		 * file. In that case, the orphan item represents incomplete
3602 		 * fsverity metadata which must be cleaned up with
3603 		 * btrfs_drop_verity_items and deleting the orphan item.
3604 
3605 		 * 2. Old kernels (before v3.12) used to create an
3606 		 * orphan item for truncate indicating that there were possibly
3607 		 * extent items past i_size that needed to be deleted. In v3.12,
3608 		 * truncate was changed to update i_size in sync with the extent
3609 		 * items, but the (useless) orphan item was still created. Since
3610 		 * v4.18, we don't create the orphan item for truncate at all.
3611 		 *
3612 		 * So, this item could mean that we need to do a truncate, but
3613 		 * only if this filesystem was last used on a pre-v3.12 kernel
3614 		 * and was not cleanly unmounted. The odds of that are quite
3615 		 * slim, and it's a pain to do the truncate now, so just delete
3616 		 * the orphan item.
3617 		 *
3618 		 * It's also possible that this orphan item was supposed to be
3619 		 * deleted but wasn't. The inode number may have been reused,
3620 		 * but either way, we can delete the orphan item.
3621 		 */
3622 		if (!inode || inode->i_nlink) {
3623 			if (inode) {
3624 				ret = btrfs_drop_verity_items(BTRFS_I(inode));
3625 				iput(inode);
3626 				inode = NULL;
3627 				if (ret)
3628 					goto out;
3629 			}
3630 			trans = btrfs_start_transaction(root, 1);
3631 			if (IS_ERR(trans)) {
3632 				ret = PTR_ERR(trans);
3633 				goto out;
3634 			}
3635 			btrfs_debug(fs_info, "auto deleting %Lu",
3636 				    found_key.objectid);
3637 			ret = btrfs_del_orphan_item(trans, root,
3638 						    found_key.objectid);
3639 			btrfs_end_transaction(trans);
3640 			if (ret)
3641 				goto out;
3642 			continue;
3643 		}
3644 
3645 		nr_unlink++;
3646 
3647 		/* this will do delete_inode and everything for us */
3648 		iput(inode);
3649 	}
3650 	/* release the path since we're done with it */
3651 	btrfs_release_path(path);
3652 
3653 	if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
3654 		trans = btrfs_join_transaction(root);
3655 		if (!IS_ERR(trans))
3656 			btrfs_end_transaction(trans);
3657 	}
3658 
3659 	if (nr_unlink)
3660 		btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink);
3661 
3662 out:
3663 	if (ret)
3664 		btrfs_err(fs_info, "could not do orphan cleanup %d", ret);
3665 	btrfs_free_path(path);
3666 	return ret;
3667 }
3668 
3669 /*
3670  * very simple check to peek ahead in the leaf looking for xattrs.  If we
3671  * don't find any xattrs, we know there can't be any acls.
3672  *
3673  * slot is the slot the inode is in, objectid is the objectid of the inode
3674  */
3675 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3676 					  int slot, u64 objectid,
3677 					  int *first_xattr_slot)
3678 {
3679 	u32 nritems = btrfs_header_nritems(leaf);
3680 	struct btrfs_key found_key;
3681 	static u64 xattr_access = 0;
3682 	static u64 xattr_default = 0;
3683 	int scanned = 0;
3684 
3685 	if (!xattr_access) {
3686 		xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS,
3687 					strlen(XATTR_NAME_POSIX_ACL_ACCESS));
3688 		xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT,
3689 					strlen(XATTR_NAME_POSIX_ACL_DEFAULT));
3690 	}
3691 
3692 	slot++;
3693 	*first_xattr_slot = -1;
3694 	while (slot < nritems) {
3695 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3696 
3697 		/* we found a different objectid, there must not be acls */
3698 		if (found_key.objectid != objectid)
3699 			return 0;
3700 
3701 		/* we found an xattr, assume we've got an acl */
3702 		if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3703 			if (*first_xattr_slot == -1)
3704 				*first_xattr_slot = slot;
3705 			if (found_key.offset == xattr_access ||
3706 			    found_key.offset == xattr_default)
3707 				return 1;
3708 		}
3709 
3710 		/*
3711 		 * we found a key greater than an xattr key, there can't
3712 		 * be any acls later on
3713 		 */
3714 		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3715 			return 0;
3716 
3717 		slot++;
3718 		scanned++;
3719 
3720 		/*
3721 		 * it goes inode, inode backrefs, xattrs, extents,
3722 		 * so if there are a ton of hard links to an inode there can
3723 		 * be a lot of backrefs.  Don't waste time searching too hard,
3724 		 * this is just an optimization
3725 		 */
3726 		if (scanned >= 8)
3727 			break;
3728 	}
3729 	/* we hit the end of the leaf before we found an xattr or
3730 	 * something larger than an xattr.  We have to assume the inode
3731 	 * has acls
3732 	 */
3733 	if (*first_xattr_slot == -1)
3734 		*first_xattr_slot = slot;
3735 	return 1;
3736 }
3737 
3738 static int btrfs_init_file_extent_tree(struct btrfs_inode *inode)
3739 {
3740 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3741 
3742 	if (WARN_ON_ONCE(inode->file_extent_tree))
3743 		return 0;
3744 	if (btrfs_fs_incompat(fs_info, NO_HOLES))
3745 		return 0;
3746 	if (!S_ISREG(inode->vfs_inode.i_mode))
3747 		return 0;
3748 	if (btrfs_is_free_space_inode(inode))
3749 		return 0;
3750 
3751 	inode->file_extent_tree = kmalloc(sizeof(struct extent_io_tree), GFP_KERNEL);
3752 	if (!inode->file_extent_tree)
3753 		return -ENOMEM;
3754 
3755 	extent_io_tree_init(fs_info, inode->file_extent_tree, IO_TREE_INODE_FILE_EXTENT);
3756 	/* Lockdep class is set only for the file extent tree. */
3757 	lockdep_set_class(&inode->file_extent_tree->lock, &file_extent_tree_class);
3758 
3759 	return 0;
3760 }
3761 
3762 /*
3763  * read an inode from the btree into the in-memory inode
3764  */
3765 static int btrfs_read_locked_inode(struct inode *inode,
3766 				   struct btrfs_path *in_path)
3767 {
3768 	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
3769 	struct btrfs_path *path = in_path;
3770 	struct extent_buffer *leaf;
3771 	struct btrfs_inode_item *inode_item;
3772 	struct btrfs_root *root = BTRFS_I(inode)->root;
3773 	struct btrfs_key location;
3774 	unsigned long ptr;
3775 	int maybe_acls;
3776 	u32 rdev;
3777 	int ret;
3778 	bool filled = false;
3779 	int first_xattr_slot;
3780 
3781 	ret = btrfs_init_file_extent_tree(BTRFS_I(inode));
3782 	if (ret)
3783 		return ret;
3784 
3785 	ret = btrfs_fill_inode(inode, &rdev);
3786 	if (!ret)
3787 		filled = true;
3788 
3789 	if (!path) {
3790 		path = btrfs_alloc_path();
3791 		if (!path)
3792 			return -ENOMEM;
3793 	}
3794 
3795 	btrfs_get_inode_key(BTRFS_I(inode), &location);
3796 
3797 	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3798 	if (ret) {
3799 		if (path != in_path)
3800 			btrfs_free_path(path);
3801 		return ret;
3802 	}
3803 
3804 	leaf = path->nodes[0];
3805 
3806 	if (filled)
3807 		goto cache_index;
3808 
3809 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
3810 				    struct btrfs_inode_item);
3811 	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3812 	set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3813 	i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3814 	i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3815 	btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item));
3816 	btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
3817 			round_up(i_size_read(inode), fs_info->sectorsize));
3818 
3819 	inode_set_atime(inode, btrfs_timespec_sec(leaf, &inode_item->atime),
3820 			btrfs_timespec_nsec(leaf, &inode_item->atime));
3821 
3822 	inode_set_mtime(inode, btrfs_timespec_sec(leaf, &inode_item->mtime),
3823 			btrfs_timespec_nsec(leaf, &inode_item->mtime));
3824 
3825 	inode_set_ctime(inode, btrfs_timespec_sec(leaf, &inode_item->ctime),
3826 			btrfs_timespec_nsec(leaf, &inode_item->ctime));
3827 
3828 	BTRFS_I(inode)->i_otime_sec = btrfs_timespec_sec(leaf, &inode_item->otime);
3829 	BTRFS_I(inode)->i_otime_nsec = btrfs_timespec_nsec(leaf, &inode_item->otime);
3830 
3831 	inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3832 	BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3833 	BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3834 
3835 	inode_set_iversion_queried(inode,
3836 				   btrfs_inode_sequence(leaf, inode_item));
3837 	inode->i_generation = BTRFS_I(inode)->generation;
3838 	inode->i_rdev = 0;
3839 	rdev = btrfs_inode_rdev(leaf, inode_item);
3840 
3841 	if (S_ISDIR(inode->i_mode))
3842 		BTRFS_I(inode)->index_cnt = (u64)-1;
3843 
3844 	btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item),
3845 				&BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
3846 
3847 cache_index:
3848 	/*
3849 	 * If we were modified in the current generation and evicted from memory
3850 	 * and then re-read we need to do a full sync since we don't have any
3851 	 * idea about which extents were modified before we were evicted from
3852 	 * cache.
3853 	 *
3854 	 * This is required for both inode re-read from disk and delayed inode
3855 	 * in the delayed_nodes xarray.
3856 	 */
3857 	if (BTRFS_I(inode)->last_trans == btrfs_get_fs_generation(fs_info))
3858 		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3859 			&BTRFS_I(inode)->runtime_flags);
3860 
3861 	/*
3862 	 * We don't persist the id of the transaction where an unlink operation
3863 	 * against the inode was last made. So here we assume the inode might
3864 	 * have been evicted, and therefore the exact value of last_unlink_trans
3865 	 * lost, and set it to last_trans to avoid metadata inconsistencies
3866 	 * between the inode and its parent if the inode is fsync'ed and the log
3867 	 * replayed. For example, in the scenario:
3868 	 *
3869 	 * touch mydir/foo
3870 	 * ln mydir/foo mydir/bar
3871 	 * sync
3872 	 * unlink mydir/bar
3873 	 * echo 2 > /proc/sys/vm/drop_caches   # evicts inode
3874 	 * xfs_io -c fsync mydir/foo
3875 	 * <power failure>
3876 	 * mount fs, triggers fsync log replay
3877 	 *
3878 	 * We must make sure that when we fsync our inode foo we also log its
3879 	 * parent inode, otherwise after log replay the parent still has the
3880 	 * dentry with the "bar" name but our inode foo has a link count of 1
3881 	 * and doesn't have an inode ref with the name "bar" anymore.
3882 	 *
3883 	 * Setting last_unlink_trans to last_trans is a pessimistic approach,
3884 	 * but it guarantees correctness at the expense of occasional full
3885 	 * transaction commits on fsync if our inode is a directory, or if our
3886 	 * inode is not a directory, logging its parent unnecessarily.
3887 	 */
3888 	BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
3889 
3890 	/*
3891 	 * Same logic as for last_unlink_trans. We don't persist the generation
3892 	 * of the last transaction where this inode was used for a reflink
3893 	 * operation, so after eviction and reloading the inode we must be
3894 	 * pessimistic and assume the last transaction that modified the inode.
3895 	 */
3896 	BTRFS_I(inode)->last_reflink_trans = BTRFS_I(inode)->last_trans;
3897 
3898 	path->slots[0]++;
3899 	if (inode->i_nlink != 1 ||
3900 	    path->slots[0] >= btrfs_header_nritems(leaf))
3901 		goto cache_acl;
3902 
3903 	btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
3904 	if (location.objectid != btrfs_ino(BTRFS_I(inode)))
3905 		goto cache_acl;
3906 
3907 	ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3908 	if (location.type == BTRFS_INODE_REF_KEY) {
3909 		struct btrfs_inode_ref *ref;
3910 
3911 		ref = (struct btrfs_inode_ref *)ptr;
3912 		BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
3913 	} else if (location.type == BTRFS_INODE_EXTREF_KEY) {
3914 		struct btrfs_inode_extref *extref;
3915 
3916 		extref = (struct btrfs_inode_extref *)ptr;
3917 		BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
3918 								     extref);
3919 	}
3920 cache_acl:
3921 	/*
3922 	 * try to precache a NULL acl entry for files that don't have
3923 	 * any xattrs or acls
3924 	 */
3925 	maybe_acls = acls_after_inode_item(leaf, path->slots[0],
3926 			btrfs_ino(BTRFS_I(inode)), &first_xattr_slot);
3927 	if (first_xattr_slot != -1) {
3928 		path->slots[0] = first_xattr_slot;
3929 		ret = btrfs_load_inode_props(inode, path);
3930 		if (ret)
3931 			btrfs_err(fs_info,
3932 				  "error loading props for ino %llu (root %llu): %d",
3933 				  btrfs_ino(BTRFS_I(inode)),
3934 				  btrfs_root_id(root), ret);
3935 	}
3936 	if (path != in_path)
3937 		btrfs_free_path(path);
3938 
3939 	if (!maybe_acls)
3940 		cache_no_acl(inode);
3941 
3942 	switch (inode->i_mode & S_IFMT) {
3943 	case S_IFREG:
3944 		inode->i_mapping->a_ops = &btrfs_aops;
3945 		inode->i_fop = &btrfs_file_operations;
3946 		inode->i_op = &btrfs_file_inode_operations;
3947 		break;
3948 	case S_IFDIR:
3949 		inode->i_fop = &btrfs_dir_file_operations;
3950 		inode->i_op = &btrfs_dir_inode_operations;
3951 		break;
3952 	case S_IFLNK:
3953 		inode->i_op = &btrfs_symlink_inode_operations;
3954 		inode_nohighmem(inode);
3955 		inode->i_mapping->a_ops = &btrfs_aops;
3956 		break;
3957 	default:
3958 		inode->i_op = &btrfs_special_inode_operations;
3959 		init_special_inode(inode, inode->i_mode, rdev);
3960 		break;
3961 	}
3962 
3963 	btrfs_sync_inode_flags_to_i_flags(inode);
3964 	return 0;
3965 }
3966 
3967 /*
3968  * given a leaf and an inode, copy the inode fields into the leaf
3969  */
3970 static void fill_inode_item(struct btrfs_trans_handle *trans,
3971 			    struct extent_buffer *leaf,
3972 			    struct btrfs_inode_item *item,
3973 			    struct inode *inode)
3974 {
3975 	struct btrfs_map_token token;
3976 	u64 flags;
3977 
3978 	btrfs_init_map_token(&token, leaf);
3979 
3980 	btrfs_set_token_inode_uid(&token, item, i_uid_read(inode));
3981 	btrfs_set_token_inode_gid(&token, item, i_gid_read(inode));
3982 	btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size);
3983 	btrfs_set_token_inode_mode(&token, item, inode->i_mode);
3984 	btrfs_set_token_inode_nlink(&token, item, inode->i_nlink);
3985 
3986 	btrfs_set_token_timespec_sec(&token, &item->atime,
3987 				     inode_get_atime_sec(inode));
3988 	btrfs_set_token_timespec_nsec(&token, &item->atime,
3989 				      inode_get_atime_nsec(inode));
3990 
3991 	btrfs_set_token_timespec_sec(&token, &item->mtime,
3992 				     inode_get_mtime_sec(inode));
3993 	btrfs_set_token_timespec_nsec(&token, &item->mtime,
3994 				      inode_get_mtime_nsec(inode));
3995 
3996 	btrfs_set_token_timespec_sec(&token, &item->ctime,
3997 				     inode_get_ctime_sec(inode));
3998 	btrfs_set_token_timespec_nsec(&token, &item->ctime,
3999 				      inode_get_ctime_nsec(inode));
4000 
4001 	btrfs_set_token_timespec_sec(&token, &item->otime, BTRFS_I(inode)->i_otime_sec);
4002 	btrfs_set_token_timespec_nsec(&token, &item->otime, BTRFS_I(inode)->i_otime_nsec);
4003 
4004 	btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode));
4005 	btrfs_set_token_inode_generation(&token, item,
4006 					 BTRFS_I(inode)->generation);
4007 	btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode));
4008 	btrfs_set_token_inode_transid(&token, item, trans->transid);
4009 	btrfs_set_token_inode_rdev(&token, item, inode->i_rdev);
4010 	flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
4011 					  BTRFS_I(inode)->ro_flags);
4012 	btrfs_set_token_inode_flags(&token, item, flags);
4013 	btrfs_set_token_inode_block_group(&token, item, 0);
4014 }
4015 
4016 /*
4017  * copy everything in the in-memory inode into the btree.
4018  */
4019 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
4020 					    struct btrfs_inode *inode)
4021 {
4022 	struct btrfs_inode_item *inode_item;
4023 	struct btrfs_path *path;
4024 	struct extent_buffer *leaf;
4025 	struct btrfs_key key;
4026 	int ret;
4027 
4028 	path = btrfs_alloc_path();
4029 	if (!path)
4030 		return -ENOMEM;
4031 
4032 	btrfs_get_inode_key(inode, &key);
4033 	ret = btrfs_lookup_inode(trans, inode->root, path, &key, 1);
4034 	if (ret) {
4035 		if (ret > 0)
4036 			ret = -ENOENT;
4037 		goto failed;
4038 	}
4039 
4040 	leaf = path->nodes[0];
4041 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
4042 				    struct btrfs_inode_item);
4043 
4044 	fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode);
4045 	btrfs_mark_buffer_dirty(trans, leaf);
4046 	btrfs_set_inode_last_trans(trans, inode);
4047 	ret = 0;
4048 failed:
4049 	btrfs_free_path(path);
4050 	return ret;
4051 }
4052 
4053 /*
4054  * copy everything in the in-memory inode into the btree.
4055  */
4056 int btrfs_update_inode(struct btrfs_trans_handle *trans,
4057 		       struct btrfs_inode *inode)
4058 {
4059 	struct btrfs_root *root = inode->root;
4060 	struct btrfs_fs_info *fs_info = root->fs_info;
4061 	int ret;
4062 
4063 	/*
4064 	 * If the inode is a free space inode, we can deadlock during commit
4065 	 * if we put it into the delayed code.
4066 	 *
4067 	 * The data relocation inode should also be directly updated
4068 	 * without delay
4069 	 */
4070 	if (!btrfs_is_free_space_inode(inode)
4071 	    && !btrfs_is_data_reloc_root(root)
4072 	    && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
4073 		btrfs_update_root_times(trans, root);
4074 
4075 		ret = btrfs_delayed_update_inode(trans, inode);
4076 		if (!ret)
4077 			btrfs_set_inode_last_trans(trans, inode);
4078 		return ret;
4079 	}
4080 
4081 	return btrfs_update_inode_item(trans, inode);
4082 }
4083 
4084 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
4085 				struct btrfs_inode *inode)
4086 {
4087 	int ret;
4088 
4089 	ret = btrfs_update_inode(trans, inode);
4090 	if (ret == -ENOSPC)
4091 		return btrfs_update_inode_item(trans, inode);
4092 	return ret;
4093 }
4094 
4095 /*
4096  * unlink helper that gets used here in inode.c and in the tree logging
4097  * recovery code.  It remove a link in a directory with a given name, and
4098  * also drops the back refs in the inode to the directory
4099  */
4100 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4101 				struct btrfs_inode *dir,
4102 				struct btrfs_inode *inode,
4103 				const struct fscrypt_str *name,
4104 				struct btrfs_rename_ctx *rename_ctx)
4105 {
4106 	struct btrfs_root *root = dir->root;
4107 	struct btrfs_fs_info *fs_info = root->fs_info;
4108 	struct btrfs_path *path;
4109 	int ret = 0;
4110 	struct btrfs_dir_item *di;
4111 	u64 index;
4112 	u64 ino = btrfs_ino(inode);
4113 	u64 dir_ino = btrfs_ino(dir);
4114 
4115 	path = btrfs_alloc_path();
4116 	if (!path) {
4117 		ret = -ENOMEM;
4118 		goto out;
4119 	}
4120 
4121 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, -1);
4122 	if (IS_ERR_OR_NULL(di)) {
4123 		ret = di ? PTR_ERR(di) : -ENOENT;
4124 		goto err;
4125 	}
4126 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
4127 	if (ret)
4128 		goto err;
4129 	btrfs_release_path(path);
4130 
4131 	/*
4132 	 * If we don't have dir index, we have to get it by looking up
4133 	 * the inode ref, since we get the inode ref, remove it directly,
4134 	 * it is unnecessary to do delayed deletion.
4135 	 *
4136 	 * But if we have dir index, needn't search inode ref to get it.
4137 	 * Since the inode ref is close to the inode item, it is better
4138 	 * that we delay to delete it, and just do this deletion when
4139 	 * we update the inode item.
4140 	 */
4141 	if (inode->dir_index) {
4142 		ret = btrfs_delayed_delete_inode_ref(inode);
4143 		if (!ret) {
4144 			index = inode->dir_index;
4145 			goto skip_backref;
4146 		}
4147 	}
4148 
4149 	ret = btrfs_del_inode_ref(trans, root, name, ino, dir_ino, &index);
4150 	if (ret) {
4151 		btrfs_info(fs_info,
4152 			"failed to delete reference to %.*s, inode %llu parent %llu",
4153 			name->len, name->name, ino, dir_ino);
4154 		btrfs_abort_transaction(trans, ret);
4155 		goto err;
4156 	}
4157 skip_backref:
4158 	if (rename_ctx)
4159 		rename_ctx->index = index;
4160 
4161 	ret = btrfs_delete_delayed_dir_index(trans, dir, index);
4162 	if (ret) {
4163 		btrfs_abort_transaction(trans, ret);
4164 		goto err;
4165 	}
4166 
4167 	/*
4168 	 * If we are in a rename context, we don't need to update anything in the
4169 	 * log. That will be done later during the rename by btrfs_log_new_name().
4170 	 * Besides that, doing it here would only cause extra unnecessary btree
4171 	 * operations on the log tree, increasing latency for applications.
4172 	 */
4173 	if (!rename_ctx) {
4174 		btrfs_del_inode_ref_in_log(trans, root, name, inode, dir_ino);
4175 		btrfs_del_dir_entries_in_log(trans, root, name, dir, index);
4176 	}
4177 
4178 	/*
4179 	 * If we have a pending delayed iput we could end up with the final iput
4180 	 * being run in btrfs-cleaner context.  If we have enough of these built
4181 	 * up we can end up burning a lot of time in btrfs-cleaner without any
4182 	 * way to throttle the unlinks.  Since we're currently holding a ref on
4183 	 * the inode we can run the delayed iput here without any issues as the
4184 	 * final iput won't be done until after we drop the ref we're currently
4185 	 * holding.
4186 	 */
4187 	btrfs_run_delayed_iput(fs_info, inode);
4188 err:
4189 	btrfs_free_path(path);
4190 	if (ret)
4191 		goto out;
4192 
4193 	btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2);
4194 	inode_inc_iversion(&inode->vfs_inode);
4195 	inode_inc_iversion(&dir->vfs_inode);
4196  	inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode));
4197 	ret = btrfs_update_inode(trans, dir);
4198 out:
4199 	return ret;
4200 }
4201 
4202 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4203 		       struct btrfs_inode *dir, struct btrfs_inode *inode,
4204 		       const struct fscrypt_str *name)
4205 {
4206 	int ret;
4207 
4208 	ret = __btrfs_unlink_inode(trans, dir, inode, name, NULL);
4209 	if (!ret) {
4210 		drop_nlink(&inode->vfs_inode);
4211 		ret = btrfs_update_inode(trans, inode);
4212 	}
4213 	return ret;
4214 }
4215 
4216 /*
4217  * helper to start transaction for unlink and rmdir.
4218  *
4219  * unlink and rmdir are special in btrfs, they do not always free space, so
4220  * if we cannot make our reservations the normal way try and see if there is
4221  * plenty of slack room in the global reserve to migrate, otherwise we cannot
4222  * allow the unlink to occur.
4223  */
4224 static struct btrfs_trans_handle *__unlink_start_trans(struct btrfs_inode *dir)
4225 {
4226 	struct btrfs_root *root = dir->root;
4227 
4228 	return btrfs_start_transaction_fallback_global_rsv(root,
4229 						   BTRFS_UNLINK_METADATA_UNITS);
4230 }
4231 
4232 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
4233 {
4234 	struct btrfs_trans_handle *trans;
4235 	struct inode *inode = d_inode(dentry);
4236 	int ret;
4237 	struct fscrypt_name fname;
4238 
4239 	ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
4240 	if (ret)
4241 		return ret;
4242 
4243 	/* This needs to handle no-key deletions later on */
4244 
4245 	trans = __unlink_start_trans(BTRFS_I(dir));
4246 	if (IS_ERR(trans)) {
4247 		ret = PTR_ERR(trans);
4248 		goto fscrypt_free;
4249 	}
4250 
4251 	btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4252 				false);
4253 
4254 	ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4255 				 &fname.disk_name);
4256 	if (ret)
4257 		goto end_trans;
4258 
4259 	if (inode->i_nlink == 0) {
4260 		ret = btrfs_orphan_add(trans, BTRFS_I(inode));
4261 		if (ret)
4262 			goto end_trans;
4263 	}
4264 
4265 end_trans:
4266 	btrfs_end_transaction(trans);
4267 	btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info);
4268 fscrypt_free:
4269 	fscrypt_free_filename(&fname);
4270 	return ret;
4271 }
4272 
4273 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4274 			       struct btrfs_inode *dir, struct dentry *dentry)
4275 {
4276 	struct btrfs_root *root = dir->root;
4277 	struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
4278 	struct btrfs_path *path;
4279 	struct extent_buffer *leaf;
4280 	struct btrfs_dir_item *di;
4281 	struct btrfs_key key;
4282 	u64 index;
4283 	int ret;
4284 	u64 objectid;
4285 	u64 dir_ino = btrfs_ino(dir);
4286 	struct fscrypt_name fname;
4287 
4288 	ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname);
4289 	if (ret)
4290 		return ret;
4291 
4292 	/* This needs to handle no-key deletions later on */
4293 
4294 	if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
4295 		objectid = btrfs_root_id(inode->root);
4296 	} else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4297 		objectid = inode->ref_root_id;
4298 	} else {
4299 		WARN_ON(1);
4300 		fscrypt_free_filename(&fname);
4301 		return -EINVAL;
4302 	}
4303 
4304 	path = btrfs_alloc_path();
4305 	if (!path) {
4306 		ret = -ENOMEM;
4307 		goto out;
4308 	}
4309 
4310 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4311 				   &fname.disk_name, -1);
4312 	if (IS_ERR_OR_NULL(di)) {
4313 		ret = di ? PTR_ERR(di) : -ENOENT;
4314 		goto out;
4315 	}
4316 
4317 	leaf = path->nodes[0];
4318 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
4319 	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
4320 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
4321 	if (ret) {
4322 		btrfs_abort_transaction(trans, ret);
4323 		goto out;
4324 	}
4325 	btrfs_release_path(path);
4326 
4327 	/*
4328 	 * This is a placeholder inode for a subvolume we didn't have a
4329 	 * reference to at the time of the snapshot creation.  In the meantime
4330 	 * we could have renamed the real subvol link into our snapshot, so
4331 	 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect.
4332 	 * Instead simply lookup the dir_index_item for this entry so we can
4333 	 * remove it.  Otherwise we know we have a ref to the root and we can
4334 	 * call btrfs_del_root_ref, and it _shouldn't_ fail.
4335 	 */
4336 	if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4337 		di = btrfs_search_dir_index_item(root, path, dir_ino, &fname.disk_name);
4338 		if (IS_ERR_OR_NULL(di)) {
4339 			if (!di)
4340 				ret = -ENOENT;
4341 			else
4342 				ret = PTR_ERR(di);
4343 			btrfs_abort_transaction(trans, ret);
4344 			goto out;
4345 		}
4346 
4347 		leaf = path->nodes[0];
4348 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4349 		index = key.offset;
4350 		btrfs_release_path(path);
4351 	} else {
4352 		ret = btrfs_del_root_ref(trans, objectid,
4353 					 btrfs_root_id(root), dir_ino,
4354 					 &index, &fname.disk_name);
4355 		if (ret) {
4356 			btrfs_abort_transaction(trans, ret);
4357 			goto out;
4358 		}
4359 	}
4360 
4361 	ret = btrfs_delete_delayed_dir_index(trans, dir, index);
4362 	if (ret) {
4363 		btrfs_abort_transaction(trans, ret);
4364 		goto out;
4365 	}
4366 
4367 	btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2);
4368 	inode_inc_iversion(&dir->vfs_inode);
4369 	inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode));
4370 	ret = btrfs_update_inode_fallback(trans, dir);
4371 	if (ret)
4372 		btrfs_abort_transaction(trans, ret);
4373 out:
4374 	btrfs_free_path(path);
4375 	fscrypt_free_filename(&fname);
4376 	return ret;
4377 }
4378 
4379 /*
4380  * Helper to check if the subvolume references other subvolumes or if it's
4381  * default.
4382  */
4383 static noinline int may_destroy_subvol(struct btrfs_root *root)
4384 {
4385 	struct btrfs_fs_info *fs_info = root->fs_info;
4386 	struct btrfs_path *path;
4387 	struct btrfs_dir_item *di;
4388 	struct btrfs_key key;
4389 	struct fscrypt_str name = FSTR_INIT("default", 7);
4390 	u64 dir_id;
4391 	int ret;
4392 
4393 	path = btrfs_alloc_path();
4394 	if (!path)
4395 		return -ENOMEM;
4396 
4397 	/* Make sure this root isn't set as the default subvol */
4398 	dir_id = btrfs_super_root_dir(fs_info->super_copy);
4399 	di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path,
4400 				   dir_id, &name, 0);
4401 	if (di && !IS_ERR(di)) {
4402 		btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
4403 		if (key.objectid == btrfs_root_id(root)) {
4404 			ret = -EPERM;
4405 			btrfs_err(fs_info,
4406 				  "deleting default subvolume %llu is not allowed",
4407 				  key.objectid);
4408 			goto out;
4409 		}
4410 		btrfs_release_path(path);
4411 	}
4412 
4413 	key.objectid = btrfs_root_id(root);
4414 	key.type = BTRFS_ROOT_REF_KEY;
4415 	key.offset = (u64)-1;
4416 
4417 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4418 	if (ret < 0)
4419 		goto out;
4420 	if (ret == 0) {
4421 		/*
4422 		 * Key with offset -1 found, there would have to exist a root
4423 		 * with such id, but this is out of valid range.
4424 		 */
4425 		ret = -EUCLEAN;
4426 		goto out;
4427 	}
4428 
4429 	ret = 0;
4430 	if (path->slots[0] > 0) {
4431 		path->slots[0]--;
4432 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
4433 		if (key.objectid == btrfs_root_id(root) && key.type == BTRFS_ROOT_REF_KEY)
4434 			ret = -ENOTEMPTY;
4435 	}
4436 out:
4437 	btrfs_free_path(path);
4438 	return ret;
4439 }
4440 
4441 /* Delete all dentries for inodes belonging to the root */
4442 static void btrfs_prune_dentries(struct btrfs_root *root)
4443 {
4444 	struct btrfs_fs_info *fs_info = root->fs_info;
4445 	struct btrfs_inode *inode;
4446 	u64 min_ino = 0;
4447 
4448 	if (!BTRFS_FS_ERROR(fs_info))
4449 		WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4450 
4451 	inode = btrfs_find_first_inode(root, min_ino);
4452 	while (inode) {
4453 		if (atomic_read(&inode->vfs_inode.i_count) > 1)
4454 			d_prune_aliases(&inode->vfs_inode);
4455 
4456 		min_ino = btrfs_ino(inode) + 1;
4457 		/*
4458 		 * btrfs_drop_inode() will have it removed from the inode
4459 		 * cache when its usage count hits zero.
4460 		 */
4461 		iput(&inode->vfs_inode);
4462 		cond_resched();
4463 		inode = btrfs_find_first_inode(root, min_ino);
4464 	}
4465 }
4466 
4467 int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
4468 {
4469 	struct btrfs_root *root = dir->root;
4470 	struct btrfs_fs_info *fs_info = root->fs_info;
4471 	struct inode *inode = d_inode(dentry);
4472 	struct btrfs_root *dest = BTRFS_I(inode)->root;
4473 	struct btrfs_trans_handle *trans;
4474 	struct btrfs_block_rsv block_rsv;
4475 	u64 root_flags;
4476 	u64 qgroup_reserved = 0;
4477 	int ret;
4478 
4479 	down_write(&fs_info->subvol_sem);
4480 
4481 	/*
4482 	 * Don't allow to delete a subvolume with send in progress. This is
4483 	 * inside the inode lock so the error handling that has to drop the bit
4484 	 * again is not run concurrently.
4485 	 */
4486 	spin_lock(&dest->root_item_lock);
4487 	if (dest->send_in_progress) {
4488 		spin_unlock(&dest->root_item_lock);
4489 		btrfs_warn(fs_info,
4490 			   "attempt to delete subvolume %llu during send",
4491 			   btrfs_root_id(dest));
4492 		ret = -EPERM;
4493 		goto out_up_write;
4494 	}
4495 	if (atomic_read(&dest->nr_swapfiles)) {
4496 		spin_unlock(&dest->root_item_lock);
4497 		btrfs_warn(fs_info,
4498 			   "attempt to delete subvolume %llu with active swapfile",
4499 			   btrfs_root_id(root));
4500 		ret = -EPERM;
4501 		goto out_up_write;
4502 	}
4503 	root_flags = btrfs_root_flags(&dest->root_item);
4504 	btrfs_set_root_flags(&dest->root_item,
4505 			     root_flags | BTRFS_ROOT_SUBVOL_DEAD);
4506 	spin_unlock(&dest->root_item_lock);
4507 
4508 	ret = may_destroy_subvol(dest);
4509 	if (ret)
4510 		goto out_undead;
4511 
4512 	btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
4513 	/*
4514 	 * One for dir inode,
4515 	 * two for dir entries,
4516 	 * two for root ref/backref.
4517 	 */
4518 	ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
4519 	if (ret)
4520 		goto out_undead;
4521 	qgroup_reserved = block_rsv.qgroup_rsv_reserved;
4522 
4523 	trans = btrfs_start_transaction(root, 0);
4524 	if (IS_ERR(trans)) {
4525 		ret = PTR_ERR(trans);
4526 		goto out_release;
4527 	}
4528 	btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
4529 	qgroup_reserved = 0;
4530 	trans->block_rsv = &block_rsv;
4531 	trans->bytes_reserved = block_rsv.size;
4532 
4533 	btrfs_record_snapshot_destroy(trans, dir);
4534 
4535 	ret = btrfs_unlink_subvol(trans, dir, dentry);
4536 	if (ret) {
4537 		btrfs_abort_transaction(trans, ret);
4538 		goto out_end_trans;
4539 	}
4540 
4541 	ret = btrfs_record_root_in_trans(trans, dest);
4542 	if (ret) {
4543 		btrfs_abort_transaction(trans, ret);
4544 		goto out_end_trans;
4545 	}
4546 
4547 	memset(&dest->root_item.drop_progress, 0,
4548 		sizeof(dest->root_item.drop_progress));
4549 	btrfs_set_root_drop_level(&dest->root_item, 0);
4550 	btrfs_set_root_refs(&dest->root_item, 0);
4551 
4552 	if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
4553 		ret = btrfs_insert_orphan_item(trans,
4554 					fs_info->tree_root,
4555 					btrfs_root_id(dest));
4556 		if (ret) {
4557 			btrfs_abort_transaction(trans, ret);
4558 			goto out_end_trans;
4559 		}
4560 	}
4561 
4562 	ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid,
4563 				     BTRFS_UUID_KEY_SUBVOL, btrfs_root_id(dest));
4564 	if (ret && ret != -ENOENT) {
4565 		btrfs_abort_transaction(trans, ret);
4566 		goto out_end_trans;
4567 	}
4568 	if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
4569 		ret = btrfs_uuid_tree_remove(trans,
4570 					  dest->root_item.received_uuid,
4571 					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4572 					  btrfs_root_id(dest));
4573 		if (ret && ret != -ENOENT) {
4574 			btrfs_abort_transaction(trans, ret);
4575 			goto out_end_trans;
4576 		}
4577 	}
4578 
4579 	free_anon_bdev(dest->anon_dev);
4580 	dest->anon_dev = 0;
4581 out_end_trans:
4582 	trans->block_rsv = NULL;
4583 	trans->bytes_reserved = 0;
4584 	ret = btrfs_end_transaction(trans);
4585 	inode->i_flags |= S_DEAD;
4586 out_release:
4587 	btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL);
4588 	if (qgroup_reserved)
4589 		btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
4590 out_undead:
4591 	if (ret) {
4592 		spin_lock(&dest->root_item_lock);
4593 		root_flags = btrfs_root_flags(&dest->root_item);
4594 		btrfs_set_root_flags(&dest->root_item,
4595 				root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
4596 		spin_unlock(&dest->root_item_lock);
4597 	}
4598 out_up_write:
4599 	up_write(&fs_info->subvol_sem);
4600 	if (!ret) {
4601 		d_invalidate(dentry);
4602 		btrfs_prune_dentries(dest);
4603 		ASSERT(dest->send_in_progress == 0);
4604 	}
4605 
4606 	return ret;
4607 }
4608 
4609 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
4610 {
4611 	struct inode *inode = d_inode(dentry);
4612 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
4613 	int ret = 0;
4614 	struct btrfs_trans_handle *trans;
4615 	u64 last_unlink_trans;
4616 	struct fscrypt_name fname;
4617 
4618 	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
4619 		return -ENOTEMPTY;
4620 	if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID) {
4621 		if (unlikely(btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))) {
4622 			btrfs_err(fs_info,
4623 			"extent tree v2 doesn't support snapshot deletion yet");
4624 			return -EOPNOTSUPP;
4625 		}
4626 		return btrfs_delete_subvolume(BTRFS_I(dir), dentry);
4627 	}
4628 
4629 	ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
4630 	if (ret)
4631 		return ret;
4632 
4633 	/* This needs to handle no-key deletions later on */
4634 
4635 	trans = __unlink_start_trans(BTRFS_I(dir));
4636 	if (IS_ERR(trans)) {
4637 		ret = PTR_ERR(trans);
4638 		goto out_notrans;
4639 	}
4640 
4641 	if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4642 		ret = btrfs_unlink_subvol(trans, BTRFS_I(dir), dentry);
4643 		goto out;
4644 	}
4645 
4646 	ret = btrfs_orphan_add(trans, BTRFS_I(inode));
4647 	if (ret)
4648 		goto out;
4649 
4650 	last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
4651 
4652 	/* now the directory is empty */
4653 	ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4654 				 &fname.disk_name);
4655 	if (!ret) {
4656 		btrfs_i_size_write(BTRFS_I(inode), 0);
4657 		/*
4658 		 * Propagate the last_unlink_trans value of the deleted dir to
4659 		 * its parent directory. This is to prevent an unrecoverable
4660 		 * log tree in the case we do something like this:
4661 		 * 1) create dir foo
4662 		 * 2) create snapshot under dir foo
4663 		 * 3) delete the snapshot
4664 		 * 4) rmdir foo
4665 		 * 5) mkdir foo
4666 		 * 6) fsync foo or some file inside foo
4667 		 */
4668 		if (last_unlink_trans >= trans->transid)
4669 			BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
4670 	}
4671 out:
4672 	btrfs_end_transaction(trans);
4673 out_notrans:
4674 	btrfs_btree_balance_dirty(fs_info);
4675 	fscrypt_free_filename(&fname);
4676 
4677 	return ret;
4678 }
4679 
4680 /*
4681  * Read, zero a chunk and write a block.
4682  *
4683  * @inode - inode that we're zeroing
4684  * @from - the offset to start zeroing
4685  * @len - the length to zero, 0 to zero the entire range respective to the
4686  *	offset
4687  * @front - zero up to the offset instead of from the offset on
4688  *
4689  * This will find the block for the "from" offset and cow the block and zero the
4690  * part we want to zero.  This is used with truncate and hole punching.
4691  */
4692 int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
4693 			 int front)
4694 {
4695 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
4696 	struct address_space *mapping = inode->vfs_inode.i_mapping;
4697 	struct extent_io_tree *io_tree = &inode->io_tree;
4698 	struct btrfs_ordered_extent *ordered;
4699 	struct extent_state *cached_state = NULL;
4700 	struct extent_changeset *data_reserved = NULL;
4701 	bool only_release_metadata = false;
4702 	u32 blocksize = fs_info->sectorsize;
4703 	pgoff_t index = from >> PAGE_SHIFT;
4704 	unsigned offset = from & (blocksize - 1);
4705 	struct folio *folio;
4706 	gfp_t mask = btrfs_alloc_write_mask(mapping);
4707 	size_t write_bytes = blocksize;
4708 	int ret = 0;
4709 	u64 block_start;
4710 	u64 block_end;
4711 
4712 	if (IS_ALIGNED(offset, blocksize) &&
4713 	    (!len || IS_ALIGNED(len, blocksize)))
4714 		goto out;
4715 
4716 	block_start = round_down(from, blocksize);
4717 	block_end = block_start + blocksize - 1;
4718 
4719 	ret = btrfs_check_data_free_space(inode, &data_reserved, block_start,
4720 					  blocksize, false);
4721 	if (ret < 0) {
4722 		if (btrfs_check_nocow_lock(inode, block_start, &write_bytes, false) > 0) {
4723 			/* For nocow case, no need to reserve data space */
4724 			only_release_metadata = true;
4725 		} else {
4726 			goto out;
4727 		}
4728 	}
4729 	ret = btrfs_delalloc_reserve_metadata(inode, blocksize, blocksize, false);
4730 	if (ret < 0) {
4731 		if (!only_release_metadata)
4732 			btrfs_free_reserved_data_space(inode, data_reserved,
4733 						       block_start, blocksize);
4734 		goto out;
4735 	}
4736 again:
4737 	folio = __filemap_get_folio(mapping, index,
4738 				    FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask);
4739 	if (IS_ERR(folio)) {
4740 		btrfs_delalloc_release_space(inode, data_reserved, block_start,
4741 					     blocksize, true);
4742 		btrfs_delalloc_release_extents(inode, blocksize);
4743 		ret = -ENOMEM;
4744 		goto out;
4745 	}
4746 
4747 	if (!folio_test_uptodate(folio)) {
4748 		ret = btrfs_read_folio(NULL, folio);
4749 		folio_lock(folio);
4750 		if (folio->mapping != mapping) {
4751 			folio_unlock(folio);
4752 			folio_put(folio);
4753 			goto again;
4754 		}
4755 		if (!folio_test_uptodate(folio)) {
4756 			ret = -EIO;
4757 			goto out_unlock;
4758 		}
4759 	}
4760 
4761 	/*
4762 	 * We unlock the page after the io is completed and then re-lock it
4763 	 * above.  release_folio() could have come in between that and cleared
4764 	 * folio private, but left the page in the mapping.  Set the page mapped
4765 	 * here to make sure it's properly set for the subpage stuff.
4766 	 */
4767 	ret = set_folio_extent_mapped(folio);
4768 	if (ret < 0)
4769 		goto out_unlock;
4770 
4771 	folio_wait_writeback(folio);
4772 
4773 	lock_extent(io_tree, block_start, block_end, &cached_state);
4774 
4775 	ordered = btrfs_lookup_ordered_extent(inode, block_start);
4776 	if (ordered) {
4777 		unlock_extent(io_tree, block_start, block_end, &cached_state);
4778 		folio_unlock(folio);
4779 		folio_put(folio);
4780 		btrfs_start_ordered_extent(ordered);
4781 		btrfs_put_ordered_extent(ordered);
4782 		goto again;
4783 	}
4784 
4785 	clear_extent_bit(&inode->io_tree, block_start, block_end,
4786 			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4787 			 &cached_state);
4788 
4789 	ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
4790 					&cached_state);
4791 	if (ret) {
4792 		unlock_extent(io_tree, block_start, block_end, &cached_state);
4793 		goto out_unlock;
4794 	}
4795 
4796 	if (offset != blocksize) {
4797 		if (!len)
4798 			len = blocksize - offset;
4799 		if (front)
4800 			folio_zero_range(folio, block_start - folio_pos(folio),
4801 					 offset);
4802 		else
4803 			folio_zero_range(folio,
4804 					 (block_start - folio_pos(folio)) + offset,
4805 					 len);
4806 	}
4807 	btrfs_folio_clear_checked(fs_info, folio, block_start,
4808 				  block_end + 1 - block_start);
4809 	btrfs_folio_set_dirty(fs_info, folio, block_start,
4810 			      block_end + 1 - block_start);
4811 	unlock_extent(io_tree, block_start, block_end, &cached_state);
4812 
4813 	if (only_release_metadata)
4814 		set_extent_bit(&inode->io_tree, block_start, block_end,
4815 			       EXTENT_NORESERVE, NULL);
4816 
4817 out_unlock:
4818 	if (ret) {
4819 		if (only_release_metadata)
4820 			btrfs_delalloc_release_metadata(inode, blocksize, true);
4821 		else
4822 			btrfs_delalloc_release_space(inode, data_reserved,
4823 					block_start, blocksize, true);
4824 	}
4825 	btrfs_delalloc_release_extents(inode, blocksize);
4826 	folio_unlock(folio);
4827 	folio_put(folio);
4828 out:
4829 	if (only_release_metadata)
4830 		btrfs_check_nocow_unlock(inode);
4831 	extent_changeset_free(data_reserved);
4832 	return ret;
4833 }
4834 
4835 static int maybe_insert_hole(struct btrfs_inode *inode, u64 offset, u64 len)
4836 {
4837 	struct btrfs_root *root = inode->root;
4838 	struct btrfs_fs_info *fs_info = root->fs_info;
4839 	struct btrfs_trans_handle *trans;
4840 	struct btrfs_drop_extents_args drop_args = { 0 };
4841 	int ret;
4842 
4843 	/*
4844 	 * If NO_HOLES is enabled, we don't need to do anything.
4845 	 * Later, up in the call chain, either btrfs_set_inode_last_sub_trans()
4846 	 * or btrfs_update_inode() will be called, which guarantee that the next
4847 	 * fsync will know this inode was changed and needs to be logged.
4848 	 */
4849 	if (btrfs_fs_incompat(fs_info, NO_HOLES))
4850 		return 0;
4851 
4852 	/*
4853 	 * 1 - for the one we're dropping
4854 	 * 1 - for the one we're adding
4855 	 * 1 - for updating the inode.
4856 	 */
4857 	trans = btrfs_start_transaction(root, 3);
4858 	if (IS_ERR(trans))
4859 		return PTR_ERR(trans);
4860 
4861 	drop_args.start = offset;
4862 	drop_args.end = offset + len;
4863 	drop_args.drop_cache = true;
4864 
4865 	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
4866 	if (ret) {
4867 		btrfs_abort_transaction(trans, ret);
4868 		btrfs_end_transaction(trans);
4869 		return ret;
4870 	}
4871 
4872 	ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset, len);
4873 	if (ret) {
4874 		btrfs_abort_transaction(trans, ret);
4875 	} else {
4876 		btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found);
4877 		btrfs_update_inode(trans, inode);
4878 	}
4879 	btrfs_end_transaction(trans);
4880 	return ret;
4881 }
4882 
4883 /*
4884  * This function puts in dummy file extents for the area we're creating a hole
4885  * for.  So if we are truncating this file to a larger size we need to insert
4886  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4887  * the range between oldsize and size
4888  */
4889 int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
4890 {
4891 	struct btrfs_root *root = inode->root;
4892 	struct btrfs_fs_info *fs_info = root->fs_info;
4893 	struct extent_io_tree *io_tree = &inode->io_tree;
4894 	struct extent_map *em = NULL;
4895 	struct extent_state *cached_state = NULL;
4896 	u64 hole_start = ALIGN(oldsize, fs_info->sectorsize);
4897 	u64 block_end = ALIGN(size, fs_info->sectorsize);
4898 	u64 last_byte;
4899 	u64 cur_offset;
4900 	u64 hole_size;
4901 	int ret = 0;
4902 
4903 	/*
4904 	 * If our size started in the middle of a block we need to zero out the
4905 	 * rest of the block before we expand the i_size, otherwise we could
4906 	 * expose stale data.
4907 	 */
4908 	ret = btrfs_truncate_block(inode, oldsize, 0, 0);
4909 	if (ret)
4910 		return ret;
4911 
4912 	if (size <= hole_start)
4913 		return 0;
4914 
4915 	btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1,
4916 					   &cached_state);
4917 	cur_offset = hole_start;
4918 	while (1) {
4919 		em = btrfs_get_extent(inode, NULL, cur_offset, block_end - cur_offset);
4920 		if (IS_ERR(em)) {
4921 			ret = PTR_ERR(em);
4922 			em = NULL;
4923 			break;
4924 		}
4925 		last_byte = min(extent_map_end(em), block_end);
4926 		last_byte = ALIGN(last_byte, fs_info->sectorsize);
4927 		hole_size = last_byte - cur_offset;
4928 
4929 		if (!(em->flags & EXTENT_FLAG_PREALLOC)) {
4930 			struct extent_map *hole_em;
4931 
4932 			ret = maybe_insert_hole(inode, cur_offset, hole_size);
4933 			if (ret)
4934 				break;
4935 
4936 			ret = btrfs_inode_set_file_extent_range(inode,
4937 							cur_offset, hole_size);
4938 			if (ret)
4939 				break;
4940 
4941 			hole_em = alloc_extent_map();
4942 			if (!hole_em) {
4943 				btrfs_drop_extent_map_range(inode, cur_offset,
4944 						    cur_offset + hole_size - 1,
4945 						    false);
4946 				btrfs_set_inode_full_sync(inode);
4947 				goto next;
4948 			}
4949 			hole_em->start = cur_offset;
4950 			hole_em->len = hole_size;
4951 
4952 			hole_em->disk_bytenr = EXTENT_MAP_HOLE;
4953 			hole_em->disk_num_bytes = 0;
4954 			hole_em->ram_bytes = hole_size;
4955 			hole_em->generation = btrfs_get_fs_generation(fs_info);
4956 
4957 			ret = btrfs_replace_extent_map_range(inode, hole_em, true);
4958 			free_extent_map(hole_em);
4959 		} else {
4960 			ret = btrfs_inode_set_file_extent_range(inode,
4961 							cur_offset, hole_size);
4962 			if (ret)
4963 				break;
4964 		}
4965 next:
4966 		free_extent_map(em);
4967 		em = NULL;
4968 		cur_offset = last_byte;
4969 		if (cur_offset >= block_end)
4970 			break;
4971 	}
4972 	free_extent_map(em);
4973 	unlock_extent(io_tree, hole_start, block_end - 1, &cached_state);
4974 	return ret;
4975 }
4976 
4977 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
4978 {
4979 	struct btrfs_root *root = BTRFS_I(inode)->root;
4980 	struct btrfs_trans_handle *trans;
4981 	loff_t oldsize = i_size_read(inode);
4982 	loff_t newsize = attr->ia_size;
4983 	int mask = attr->ia_valid;
4984 	int ret;
4985 
4986 	/*
4987 	 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
4988 	 * special case where we need to update the times despite not having
4989 	 * these flags set.  For all other operations the VFS set these flags
4990 	 * explicitly if it wants a timestamp update.
4991 	 */
4992 	if (newsize != oldsize) {
4993 		inode_inc_iversion(inode);
4994 		if (!(mask & (ATTR_CTIME | ATTR_MTIME))) {
4995 			inode_set_mtime_to_ts(inode,
4996 					      inode_set_ctime_current(inode));
4997 		}
4998 	}
4999 
5000 	if (newsize > oldsize) {
5001 		/*
5002 		 * Don't do an expanding truncate while snapshotting is ongoing.
5003 		 * This is to ensure the snapshot captures a fully consistent
5004 		 * state of this file - if the snapshot captures this expanding
5005 		 * truncation, it must capture all writes that happened before
5006 		 * this truncation.
5007 		 */
5008 		btrfs_drew_write_lock(&root->snapshot_lock);
5009 		ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, newsize);
5010 		if (ret) {
5011 			btrfs_drew_write_unlock(&root->snapshot_lock);
5012 			return ret;
5013 		}
5014 
5015 		trans = btrfs_start_transaction(root, 1);
5016 		if (IS_ERR(trans)) {
5017 			btrfs_drew_write_unlock(&root->snapshot_lock);
5018 			return PTR_ERR(trans);
5019 		}
5020 
5021 		i_size_write(inode, newsize);
5022 		btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
5023 		pagecache_isize_extended(inode, oldsize, newsize);
5024 		ret = btrfs_update_inode(trans, BTRFS_I(inode));
5025 		btrfs_drew_write_unlock(&root->snapshot_lock);
5026 		btrfs_end_transaction(trans);
5027 	} else {
5028 		struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
5029 
5030 		if (btrfs_is_zoned(fs_info)) {
5031 			ret = btrfs_wait_ordered_range(BTRFS_I(inode),
5032 					ALIGN(newsize, fs_info->sectorsize),
5033 					(u64)-1);
5034 			if (ret)
5035 				return ret;
5036 		}
5037 
5038 		/*
5039 		 * We're truncating a file that used to have good data down to
5040 		 * zero. Make sure any new writes to the file get on disk
5041 		 * on close.
5042 		 */
5043 		if (newsize == 0)
5044 			set_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
5045 				&BTRFS_I(inode)->runtime_flags);
5046 
5047 		truncate_setsize(inode, newsize);
5048 
5049 		inode_dio_wait(inode);
5050 
5051 		ret = btrfs_truncate(BTRFS_I(inode), newsize == oldsize);
5052 		if (ret && inode->i_nlink) {
5053 			int err;
5054 
5055 			/*
5056 			 * Truncate failed, so fix up the in-memory size. We
5057 			 * adjusted disk_i_size down as we removed extents, so
5058 			 * wait for disk_i_size to be stable and then update the
5059 			 * in-memory size to match.
5060 			 */
5061 			err = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
5062 			if (err)
5063 				return err;
5064 			i_size_write(inode, BTRFS_I(inode)->disk_i_size);
5065 		}
5066 	}
5067 
5068 	return ret;
5069 }
5070 
5071 static int btrfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
5072 			 struct iattr *attr)
5073 {
5074 	struct inode *inode = d_inode(dentry);
5075 	struct btrfs_root *root = BTRFS_I(inode)->root;
5076 	int err;
5077 
5078 	if (btrfs_root_readonly(root))
5079 		return -EROFS;
5080 
5081 	err = setattr_prepare(idmap, dentry, attr);
5082 	if (err)
5083 		return err;
5084 
5085 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
5086 		err = btrfs_setsize(inode, attr);
5087 		if (err)
5088 			return err;
5089 	}
5090 
5091 	if (attr->ia_valid) {
5092 		setattr_copy(idmap, inode, attr);
5093 		inode_inc_iversion(inode);
5094 		err = btrfs_dirty_inode(BTRFS_I(inode));
5095 
5096 		if (!err && attr->ia_valid & ATTR_MODE)
5097 			err = posix_acl_chmod(idmap, dentry, inode->i_mode);
5098 	}
5099 
5100 	return err;
5101 }
5102 
5103 /*
5104  * While truncating the inode pages during eviction, we get the VFS
5105  * calling btrfs_invalidate_folio() against each folio of the inode. This
5106  * is slow because the calls to btrfs_invalidate_folio() result in a
5107  * huge amount of calls to lock_extent() and clear_extent_bit(),
5108  * which keep merging and splitting extent_state structures over and over,
5109  * wasting lots of time.
5110  *
5111  * Therefore if the inode is being evicted, let btrfs_invalidate_folio()
5112  * skip all those expensive operations on a per folio basis and do only
5113  * the ordered io finishing, while we release here the extent_map and
5114  * extent_state structures, without the excessive merging and splitting.
5115  */
5116 static void evict_inode_truncate_pages(struct inode *inode)
5117 {
5118 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5119 	struct rb_node *node;
5120 
5121 	ASSERT(inode->i_state & I_FREEING);
5122 	truncate_inode_pages_final(&inode->i_data);
5123 
5124 	btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
5125 
5126 	/*
5127 	 * Keep looping until we have no more ranges in the io tree.
5128 	 * We can have ongoing bios started by readahead that have
5129 	 * their endio callback (extent_io.c:end_bio_extent_readpage)
5130 	 * still in progress (unlocked the pages in the bio but did not yet
5131 	 * unlocked the ranges in the io tree). Therefore this means some
5132 	 * ranges can still be locked and eviction started because before
5133 	 * submitting those bios, which are executed by a separate task (work
5134 	 * queue kthread), inode references (inode->i_count) were not taken
5135 	 * (which would be dropped in the end io callback of each bio).
5136 	 * Therefore here we effectively end up waiting for those bios and
5137 	 * anyone else holding locked ranges without having bumped the inode's
5138 	 * reference count - if we don't do it, when they access the inode's
5139 	 * io_tree to unlock a range it may be too late, leading to an
5140 	 * use-after-free issue.
5141 	 */
5142 	spin_lock(&io_tree->lock);
5143 	while (!RB_EMPTY_ROOT(&io_tree->state)) {
5144 		struct extent_state *state;
5145 		struct extent_state *cached_state = NULL;
5146 		u64 start;
5147 		u64 end;
5148 		unsigned state_flags;
5149 
5150 		node = rb_first(&io_tree->state);
5151 		state = rb_entry(node, struct extent_state, rb_node);
5152 		start = state->start;
5153 		end = state->end;
5154 		state_flags = state->state;
5155 		spin_unlock(&io_tree->lock);
5156 
5157 		lock_extent(io_tree, start, end, &cached_state);
5158 
5159 		/*
5160 		 * If still has DELALLOC flag, the extent didn't reach disk,
5161 		 * and its reserved space won't be freed by delayed_ref.
5162 		 * So we need to free its reserved space here.
5163 		 * (Refer to comment in btrfs_invalidate_folio, case 2)
5164 		 *
5165 		 * Note, end is the bytenr of last byte, so we need + 1 here.
5166 		 */
5167 		if (state_flags & EXTENT_DELALLOC)
5168 			btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start,
5169 					       end - start + 1, NULL);
5170 
5171 		clear_extent_bit(io_tree, start, end,
5172 				 EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING,
5173 				 &cached_state);
5174 
5175 		cond_resched();
5176 		spin_lock(&io_tree->lock);
5177 	}
5178 	spin_unlock(&io_tree->lock);
5179 }
5180 
5181 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
5182 							struct btrfs_block_rsv *rsv)
5183 {
5184 	struct btrfs_fs_info *fs_info = root->fs_info;
5185 	struct btrfs_trans_handle *trans;
5186 	u64 delayed_refs_extra = btrfs_calc_delayed_ref_bytes(fs_info, 1);
5187 	int ret;
5188 
5189 	/*
5190 	 * Eviction should be taking place at some place safe because of our
5191 	 * delayed iputs.  However the normal flushing code will run delayed
5192 	 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock.
5193 	 *
5194 	 * We reserve the delayed_refs_extra here again because we can't use
5195 	 * btrfs_start_transaction(root, 0) for the same deadlocky reason as
5196 	 * above.  We reserve our extra bit here because we generate a ton of
5197 	 * delayed refs activity by truncating.
5198 	 *
5199 	 * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can,
5200 	 * if we fail to make this reservation we can re-try without the
5201 	 * delayed_refs_extra so we can make some forward progress.
5202 	 */
5203 	ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size + delayed_refs_extra,
5204 				     BTRFS_RESERVE_FLUSH_EVICT);
5205 	if (ret) {
5206 		ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size,
5207 					     BTRFS_RESERVE_FLUSH_EVICT);
5208 		if (ret) {
5209 			btrfs_warn(fs_info,
5210 				   "could not allocate space for delete; will truncate on mount");
5211 			return ERR_PTR(-ENOSPC);
5212 		}
5213 		delayed_refs_extra = 0;
5214 	}
5215 
5216 	trans = btrfs_join_transaction(root);
5217 	if (IS_ERR(trans))
5218 		return trans;
5219 
5220 	if (delayed_refs_extra) {
5221 		trans->block_rsv = &fs_info->trans_block_rsv;
5222 		trans->bytes_reserved = delayed_refs_extra;
5223 		btrfs_block_rsv_migrate(rsv, trans->block_rsv,
5224 					delayed_refs_extra, true);
5225 	}
5226 	return trans;
5227 }
5228 
5229 void btrfs_evict_inode(struct inode *inode)
5230 {
5231 	struct btrfs_fs_info *fs_info;
5232 	struct btrfs_trans_handle *trans;
5233 	struct btrfs_root *root = BTRFS_I(inode)->root;
5234 	struct btrfs_block_rsv *rsv = NULL;
5235 	int ret;
5236 
5237 	trace_btrfs_inode_evict(inode);
5238 
5239 	if (!root) {
5240 		fsverity_cleanup_inode(inode);
5241 		clear_inode(inode);
5242 		return;
5243 	}
5244 
5245 	fs_info = inode_to_fs_info(inode);
5246 	evict_inode_truncate_pages(inode);
5247 
5248 	if (inode->i_nlink &&
5249 	    ((btrfs_root_refs(&root->root_item) != 0 &&
5250 	      btrfs_root_id(root) != BTRFS_ROOT_TREE_OBJECTID) ||
5251 	     btrfs_is_free_space_inode(BTRFS_I(inode))))
5252 		goto out;
5253 
5254 	if (is_bad_inode(inode))
5255 		goto out;
5256 
5257 	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
5258 		goto out;
5259 
5260 	if (inode->i_nlink > 0) {
5261 		BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
5262 		       btrfs_root_id(root) != BTRFS_ROOT_TREE_OBJECTID);
5263 		goto out;
5264 	}
5265 
5266 	/*
5267 	 * This makes sure the inode item in tree is uptodate and the space for
5268 	 * the inode update is released.
5269 	 */
5270 	ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode));
5271 	if (ret)
5272 		goto out;
5273 
5274 	/*
5275 	 * This drops any pending insert or delete operations we have for this
5276 	 * inode.  We could have a delayed dir index deletion queued up, but
5277 	 * we're removing the inode completely so that'll be taken care of in
5278 	 * the truncate.
5279 	 */
5280 	btrfs_kill_delayed_inode_items(BTRFS_I(inode));
5281 
5282 	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
5283 	if (!rsv)
5284 		goto out;
5285 	rsv->size = btrfs_calc_metadata_size(fs_info, 1);
5286 	rsv->failfast = true;
5287 
5288 	btrfs_i_size_write(BTRFS_I(inode), 0);
5289 
5290 	while (1) {
5291 		struct btrfs_truncate_control control = {
5292 			.inode = BTRFS_I(inode),
5293 			.ino = btrfs_ino(BTRFS_I(inode)),
5294 			.new_size = 0,
5295 			.min_type = 0,
5296 		};
5297 
5298 		trans = evict_refill_and_join(root, rsv);
5299 		if (IS_ERR(trans))
5300 			goto out;
5301 
5302 		trans->block_rsv = rsv;
5303 
5304 		ret = btrfs_truncate_inode_items(trans, root, &control);
5305 		trans->block_rsv = &fs_info->trans_block_rsv;
5306 		btrfs_end_transaction(trans);
5307 		/*
5308 		 * We have not added new delayed items for our inode after we
5309 		 * have flushed its delayed items, so no need to throttle on
5310 		 * delayed items. However we have modified extent buffers.
5311 		 */
5312 		btrfs_btree_balance_dirty_nodelay(fs_info);
5313 		if (ret && ret != -ENOSPC && ret != -EAGAIN)
5314 			goto out;
5315 		else if (!ret)
5316 			break;
5317 	}
5318 
5319 	/*
5320 	 * Errors here aren't a big deal, it just means we leave orphan items in
5321 	 * the tree. They will be cleaned up on the next mount. If the inode
5322 	 * number gets reused, cleanup deletes the orphan item without doing
5323 	 * anything, and unlink reuses the existing orphan item.
5324 	 *
5325 	 * If it turns out that we are dropping too many of these, we might want
5326 	 * to add a mechanism for retrying these after a commit.
5327 	 */
5328 	trans = evict_refill_and_join(root, rsv);
5329 	if (!IS_ERR(trans)) {
5330 		trans->block_rsv = rsv;
5331 		btrfs_orphan_del(trans, BTRFS_I(inode));
5332 		trans->block_rsv = &fs_info->trans_block_rsv;
5333 		btrfs_end_transaction(trans);
5334 	}
5335 
5336 out:
5337 	btrfs_free_block_rsv(fs_info, rsv);
5338 	/*
5339 	 * If we didn't successfully delete, the orphan item will still be in
5340 	 * the tree and we'll retry on the next mount. Again, we might also want
5341 	 * to retry these periodically in the future.
5342 	 */
5343 	btrfs_remove_delayed_node(BTRFS_I(inode));
5344 	fsverity_cleanup_inode(inode);
5345 	clear_inode(inode);
5346 }
5347 
5348 /*
5349  * Return the key found in the dir entry in the location pointer, fill @type
5350  * with BTRFS_FT_*, and return 0.
5351  *
5352  * If no dir entries were found, returns -ENOENT.
5353  * If found a corrupted location in dir entry, returns -EUCLEAN.
5354  */
5355 static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry,
5356 			       struct btrfs_key *location, u8 *type)
5357 {
5358 	struct btrfs_dir_item *di;
5359 	struct btrfs_path *path;
5360 	struct btrfs_root *root = dir->root;
5361 	int ret = 0;
5362 	struct fscrypt_name fname;
5363 
5364 	path = btrfs_alloc_path();
5365 	if (!path)
5366 		return -ENOMEM;
5367 
5368 	ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname);
5369 	if (ret < 0)
5370 		goto out;
5371 	/*
5372 	 * fscrypt_setup_filename() should never return a positive value, but
5373 	 * gcc on sparc/parisc thinks it can, so assert that doesn't happen.
5374 	 */
5375 	ASSERT(ret == 0);
5376 
5377 	/* This needs to handle no-key deletions later on */
5378 
5379 	di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir),
5380 				   &fname.disk_name, 0);
5381 	if (IS_ERR_OR_NULL(di)) {
5382 		ret = di ? PTR_ERR(di) : -ENOENT;
5383 		goto out;
5384 	}
5385 
5386 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
5387 	if (location->type != BTRFS_INODE_ITEM_KEY &&
5388 	    location->type != BTRFS_ROOT_ITEM_KEY) {
5389 		ret = -EUCLEAN;
5390 		btrfs_warn(root->fs_info,
5391 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
5392 			   __func__, fname.disk_name.name, btrfs_ino(dir),
5393 			   location->objectid, location->type, location->offset);
5394 	}
5395 	if (!ret)
5396 		*type = btrfs_dir_ftype(path->nodes[0], di);
5397 out:
5398 	fscrypt_free_filename(&fname);
5399 	btrfs_free_path(path);
5400 	return ret;
5401 }
5402 
5403 /*
5404  * when we hit a tree root in a directory, the btrfs part of the inode
5405  * needs to be changed to reflect the root directory of the tree root.  This
5406  * is kind of like crossing a mount point.
5407  */
5408 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
5409 				    struct btrfs_inode *dir,
5410 				    struct dentry *dentry,
5411 				    struct btrfs_key *location,
5412 				    struct btrfs_root **sub_root)
5413 {
5414 	struct btrfs_path *path;
5415 	struct btrfs_root *new_root;
5416 	struct btrfs_root_ref *ref;
5417 	struct extent_buffer *leaf;
5418 	struct btrfs_key key;
5419 	int ret;
5420 	int err = 0;
5421 	struct fscrypt_name fname;
5422 
5423 	ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 0, &fname);
5424 	if (ret)
5425 		return ret;
5426 
5427 	path = btrfs_alloc_path();
5428 	if (!path) {
5429 		err = -ENOMEM;
5430 		goto out;
5431 	}
5432 
5433 	err = -ENOENT;
5434 	key.objectid = btrfs_root_id(dir->root);
5435 	key.type = BTRFS_ROOT_REF_KEY;
5436 	key.offset = location->objectid;
5437 
5438 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
5439 	if (ret) {
5440 		if (ret < 0)
5441 			err = ret;
5442 		goto out;
5443 	}
5444 
5445 	leaf = path->nodes[0];
5446 	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
5447 	if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
5448 	    btrfs_root_ref_name_len(leaf, ref) != fname.disk_name.len)
5449 		goto out;
5450 
5451 	ret = memcmp_extent_buffer(leaf, fname.disk_name.name,
5452 				   (unsigned long)(ref + 1), fname.disk_name.len);
5453 	if (ret)
5454 		goto out;
5455 
5456 	btrfs_release_path(path);
5457 
5458 	new_root = btrfs_get_fs_root(fs_info, location->objectid, true);
5459 	if (IS_ERR(new_root)) {
5460 		err = PTR_ERR(new_root);
5461 		goto out;
5462 	}
5463 
5464 	*sub_root = new_root;
5465 	location->objectid = btrfs_root_dirid(&new_root->root_item);
5466 	location->type = BTRFS_INODE_ITEM_KEY;
5467 	location->offset = 0;
5468 	err = 0;
5469 out:
5470 	btrfs_free_path(path);
5471 	fscrypt_free_filename(&fname);
5472 	return err;
5473 }
5474 
5475 static int btrfs_add_inode_to_root(struct btrfs_inode *inode, bool prealloc)
5476 {
5477 	struct btrfs_root *root = inode->root;
5478 	struct btrfs_inode *existing;
5479 	const u64 ino = btrfs_ino(inode);
5480 	int ret;
5481 
5482 	if (inode_unhashed(&inode->vfs_inode))
5483 		return 0;
5484 
5485 	if (prealloc) {
5486 		ret = xa_reserve(&root->inodes, ino, GFP_NOFS);
5487 		if (ret)
5488 			return ret;
5489 	}
5490 
5491 	existing = xa_store(&root->inodes, ino, inode, GFP_ATOMIC);
5492 
5493 	if (xa_is_err(existing)) {
5494 		ret = xa_err(existing);
5495 		ASSERT(ret != -EINVAL);
5496 		ASSERT(ret != -ENOMEM);
5497 		return ret;
5498 	} else if (existing) {
5499 		WARN_ON(!(existing->vfs_inode.i_state & (I_WILL_FREE | I_FREEING)));
5500 	}
5501 
5502 	return 0;
5503 }
5504 
5505 static void btrfs_del_inode_from_root(struct btrfs_inode *inode)
5506 {
5507 	struct btrfs_root *root = inode->root;
5508 	struct btrfs_inode *entry;
5509 	bool empty = false;
5510 
5511 	xa_lock(&root->inodes);
5512 	entry = __xa_erase(&root->inodes, btrfs_ino(inode));
5513 	if (entry == inode)
5514 		empty = xa_empty(&root->inodes);
5515 	xa_unlock(&root->inodes);
5516 
5517 	if (empty && btrfs_root_refs(&root->root_item) == 0) {
5518 		xa_lock(&root->inodes);
5519 		empty = xa_empty(&root->inodes);
5520 		xa_unlock(&root->inodes);
5521 		if (empty)
5522 			btrfs_add_dead_root(root);
5523 	}
5524 }
5525 
5526 
5527 static int btrfs_init_locked_inode(struct inode *inode, void *p)
5528 {
5529 	struct btrfs_iget_args *args = p;
5530 
5531 	btrfs_set_inode_number(BTRFS_I(inode), args->ino);
5532 	BTRFS_I(inode)->root = btrfs_grab_root(args->root);
5533 
5534 	if (args->root && args->root == args->root->fs_info->tree_root &&
5535 	    args->ino != BTRFS_BTREE_INODE_OBJECTID)
5536 		set_bit(BTRFS_INODE_FREE_SPACE_INODE,
5537 			&BTRFS_I(inode)->runtime_flags);
5538 	return 0;
5539 }
5540 
5541 static int btrfs_find_actor(struct inode *inode, void *opaque)
5542 {
5543 	struct btrfs_iget_args *args = opaque;
5544 
5545 	return args->ino == btrfs_ino(BTRFS_I(inode)) &&
5546 		args->root == BTRFS_I(inode)->root;
5547 }
5548 
5549 static struct inode *btrfs_iget_locked(u64 ino, struct btrfs_root *root)
5550 {
5551 	struct inode *inode;
5552 	struct btrfs_iget_args args;
5553 	unsigned long hashval = btrfs_inode_hash(ino, root);
5554 
5555 	args.ino = ino;
5556 	args.root = root;
5557 
5558 	inode = iget5_locked_rcu(root->fs_info->sb, hashval, btrfs_find_actor,
5559 			     btrfs_init_locked_inode,
5560 			     (void *)&args);
5561 	return inode;
5562 }
5563 
5564 /*
5565  * Get an inode object given its inode number and corresponding root.
5566  * Path can be preallocated to prevent recursing back to iget through
5567  * allocator. NULL is also valid but may require an additional allocation
5568  * later.
5569  */
5570 struct inode *btrfs_iget_path(u64 ino, struct btrfs_root *root,
5571 			      struct btrfs_path *path)
5572 {
5573 	struct inode *inode;
5574 	int ret;
5575 
5576 	inode = btrfs_iget_locked(ino, root);
5577 	if (!inode)
5578 		return ERR_PTR(-ENOMEM);
5579 
5580 	if (!(inode->i_state & I_NEW))
5581 		return inode;
5582 
5583 	ret = btrfs_read_locked_inode(inode, path);
5584 	/*
5585 	 * ret > 0 can come from btrfs_search_slot called by
5586 	 * btrfs_read_locked_inode(), this means the inode item was not found.
5587 	 */
5588 	if (ret > 0)
5589 		ret = -ENOENT;
5590 	if (ret < 0)
5591 		goto error;
5592 
5593 	ret = btrfs_add_inode_to_root(BTRFS_I(inode), true);
5594 	if (ret < 0)
5595 		goto error;
5596 
5597 	unlock_new_inode(inode);
5598 
5599 	return inode;
5600 error:
5601 	iget_failed(inode);
5602 	return ERR_PTR(ret);
5603 }
5604 
5605 struct inode *btrfs_iget(u64 ino, struct btrfs_root *root)
5606 {
5607 	return btrfs_iget_path(ino, root, NULL);
5608 }
5609 
5610 static struct inode *new_simple_dir(struct inode *dir,
5611 				    struct btrfs_key *key,
5612 				    struct btrfs_root *root)
5613 {
5614 	struct timespec64 ts;
5615 	struct inode *inode = new_inode(dir->i_sb);
5616 
5617 	if (!inode)
5618 		return ERR_PTR(-ENOMEM);
5619 
5620 	BTRFS_I(inode)->root = btrfs_grab_root(root);
5621 	BTRFS_I(inode)->ref_root_id = key->objectid;
5622 	set_bit(BTRFS_INODE_ROOT_STUB, &BTRFS_I(inode)->runtime_flags);
5623 	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
5624 
5625 	btrfs_set_inode_number(BTRFS_I(inode), BTRFS_EMPTY_SUBVOL_DIR_OBJECTID);
5626 	/*
5627 	 * We only need lookup, the rest is read-only and there's no inode
5628 	 * associated with the dentry
5629 	 */
5630 	inode->i_op = &simple_dir_inode_operations;
5631 	inode->i_opflags &= ~IOP_XATTR;
5632 	inode->i_fop = &simple_dir_operations;
5633 	inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5634 
5635 	ts = inode_set_ctime_current(inode);
5636 	inode_set_mtime_to_ts(inode, ts);
5637 	inode_set_atime_to_ts(inode, inode_get_atime(dir));
5638 	BTRFS_I(inode)->i_otime_sec = ts.tv_sec;
5639 	BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec;
5640 
5641 	inode->i_uid = dir->i_uid;
5642 	inode->i_gid = dir->i_gid;
5643 
5644 	return inode;
5645 }
5646 
5647 static_assert(BTRFS_FT_UNKNOWN == FT_UNKNOWN);
5648 static_assert(BTRFS_FT_REG_FILE == FT_REG_FILE);
5649 static_assert(BTRFS_FT_DIR == FT_DIR);
5650 static_assert(BTRFS_FT_CHRDEV == FT_CHRDEV);
5651 static_assert(BTRFS_FT_BLKDEV == FT_BLKDEV);
5652 static_assert(BTRFS_FT_FIFO == FT_FIFO);
5653 static_assert(BTRFS_FT_SOCK == FT_SOCK);
5654 static_assert(BTRFS_FT_SYMLINK == FT_SYMLINK);
5655 
5656 static inline u8 btrfs_inode_type(struct inode *inode)
5657 {
5658 	return fs_umode_to_ftype(inode->i_mode);
5659 }
5660 
5661 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
5662 {
5663 	struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
5664 	struct inode *inode;
5665 	struct btrfs_root *root = BTRFS_I(dir)->root;
5666 	struct btrfs_root *sub_root = root;
5667 	struct btrfs_key location = { 0 };
5668 	u8 di_type = 0;
5669 	int ret = 0;
5670 
5671 	if (dentry->d_name.len > BTRFS_NAME_LEN)
5672 		return ERR_PTR(-ENAMETOOLONG);
5673 
5674 	ret = btrfs_inode_by_name(BTRFS_I(dir), dentry, &location, &di_type);
5675 	if (ret < 0)
5676 		return ERR_PTR(ret);
5677 
5678 	if (location.type == BTRFS_INODE_ITEM_KEY) {
5679 		inode = btrfs_iget(location.objectid, root);
5680 		if (IS_ERR(inode))
5681 			return inode;
5682 
5683 		/* Do extra check against inode mode with di_type */
5684 		if (btrfs_inode_type(inode) != di_type) {
5685 			btrfs_crit(fs_info,
5686 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u",
5687 				  inode->i_mode, btrfs_inode_type(inode),
5688 				  di_type);
5689 			iput(inode);
5690 			return ERR_PTR(-EUCLEAN);
5691 		}
5692 		return inode;
5693 	}
5694 
5695 	ret = fixup_tree_root_location(fs_info, BTRFS_I(dir), dentry,
5696 				       &location, &sub_root);
5697 	if (ret < 0) {
5698 		if (ret != -ENOENT)
5699 			inode = ERR_PTR(ret);
5700 		else
5701 			inode = new_simple_dir(dir, &location, root);
5702 	} else {
5703 		inode = btrfs_iget(location.objectid, sub_root);
5704 		btrfs_put_root(sub_root);
5705 
5706 		if (IS_ERR(inode))
5707 			return inode;
5708 
5709 		down_read(&fs_info->cleanup_work_sem);
5710 		if (!sb_rdonly(inode->i_sb))
5711 			ret = btrfs_orphan_cleanup(sub_root);
5712 		up_read(&fs_info->cleanup_work_sem);
5713 		if (ret) {
5714 			iput(inode);
5715 			inode = ERR_PTR(ret);
5716 		}
5717 	}
5718 
5719 	return inode;
5720 }
5721 
5722 static int btrfs_dentry_delete(const struct dentry *dentry)
5723 {
5724 	struct btrfs_root *root;
5725 	struct inode *inode = d_inode(dentry);
5726 
5727 	if (!inode && !IS_ROOT(dentry))
5728 		inode = d_inode(dentry->d_parent);
5729 
5730 	if (inode) {
5731 		root = BTRFS_I(inode)->root;
5732 		if (btrfs_root_refs(&root->root_item) == 0)
5733 			return 1;
5734 
5735 		if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5736 			return 1;
5737 	}
5738 	return 0;
5739 }
5740 
5741 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
5742 				   unsigned int flags)
5743 {
5744 	struct inode *inode = btrfs_lookup_dentry(dir, dentry);
5745 
5746 	if (inode == ERR_PTR(-ENOENT))
5747 		inode = NULL;
5748 	return d_splice_alias(inode, dentry);
5749 }
5750 
5751 /*
5752  * Find the highest existing sequence number in a directory and then set the
5753  * in-memory index_cnt variable to the first free sequence number.
5754  */
5755 static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
5756 {
5757 	struct btrfs_root *root = inode->root;
5758 	struct btrfs_key key, found_key;
5759 	struct btrfs_path *path;
5760 	struct extent_buffer *leaf;
5761 	int ret;
5762 
5763 	key.objectid = btrfs_ino(inode);
5764 	key.type = BTRFS_DIR_INDEX_KEY;
5765 	key.offset = (u64)-1;
5766 
5767 	path = btrfs_alloc_path();
5768 	if (!path)
5769 		return -ENOMEM;
5770 
5771 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5772 	if (ret < 0)
5773 		goto out;
5774 	/* FIXME: we should be able to handle this */
5775 	if (ret == 0)
5776 		goto out;
5777 	ret = 0;
5778 
5779 	if (path->slots[0] == 0) {
5780 		inode->index_cnt = BTRFS_DIR_START_INDEX;
5781 		goto out;
5782 	}
5783 
5784 	path->slots[0]--;
5785 
5786 	leaf = path->nodes[0];
5787 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5788 
5789 	if (found_key.objectid != btrfs_ino(inode) ||
5790 	    found_key.type != BTRFS_DIR_INDEX_KEY) {
5791 		inode->index_cnt = BTRFS_DIR_START_INDEX;
5792 		goto out;
5793 	}
5794 
5795 	inode->index_cnt = found_key.offset + 1;
5796 out:
5797 	btrfs_free_path(path);
5798 	return ret;
5799 }
5800 
5801 static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index)
5802 {
5803 	int ret = 0;
5804 
5805 	btrfs_inode_lock(dir, 0);
5806 	if (dir->index_cnt == (u64)-1) {
5807 		ret = btrfs_inode_delayed_dir_index_count(dir);
5808 		if (ret) {
5809 			ret = btrfs_set_inode_index_count(dir);
5810 			if (ret)
5811 				goto out;
5812 		}
5813 	}
5814 
5815 	/* index_cnt is the index number of next new entry, so decrement it. */
5816 	*index = dir->index_cnt - 1;
5817 out:
5818 	btrfs_inode_unlock(dir, 0);
5819 
5820 	return ret;
5821 }
5822 
5823 /*
5824  * All this infrastructure exists because dir_emit can fault, and we are holding
5825  * the tree lock when doing readdir.  For now just allocate a buffer and copy
5826  * our information into that, and then dir_emit from the buffer.  This is
5827  * similar to what NFS does, only we don't keep the buffer around in pagecache
5828  * because I'm afraid I'll mess that up.  Long term we need to make filldir do
5829  * copy_to_user_inatomic so we don't have to worry about page faulting under the
5830  * tree lock.
5831  */
5832 static int btrfs_opendir(struct inode *inode, struct file *file)
5833 {
5834 	struct btrfs_file_private *private;
5835 	u64 last_index;
5836 	int ret;
5837 
5838 	ret = btrfs_get_dir_last_index(BTRFS_I(inode), &last_index);
5839 	if (ret)
5840 		return ret;
5841 
5842 	private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL);
5843 	if (!private)
5844 		return -ENOMEM;
5845 	private->last_index = last_index;
5846 	private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
5847 	if (!private->filldir_buf) {
5848 		kfree(private);
5849 		return -ENOMEM;
5850 	}
5851 	file->private_data = private;
5852 	return 0;
5853 }
5854 
5855 static loff_t btrfs_dir_llseek(struct file *file, loff_t offset, int whence)
5856 {
5857 	struct btrfs_file_private *private = file->private_data;
5858 	int ret;
5859 
5860 	ret = btrfs_get_dir_last_index(BTRFS_I(file_inode(file)),
5861 				       &private->last_index);
5862 	if (ret)
5863 		return ret;
5864 
5865 	return generic_file_llseek(file, offset, whence);
5866 }
5867 
5868 struct dir_entry {
5869 	u64 ino;
5870 	u64 offset;
5871 	unsigned type;
5872 	int name_len;
5873 };
5874 
5875 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
5876 {
5877 	while (entries--) {
5878 		struct dir_entry *entry = addr;
5879 		char *name = (char *)(entry + 1);
5880 
5881 		ctx->pos = get_unaligned(&entry->offset);
5882 		if (!dir_emit(ctx, name, get_unaligned(&entry->name_len),
5883 					 get_unaligned(&entry->ino),
5884 					 get_unaligned(&entry->type)))
5885 			return 1;
5886 		addr += sizeof(struct dir_entry) +
5887 			get_unaligned(&entry->name_len);
5888 		ctx->pos++;
5889 	}
5890 	return 0;
5891 }
5892 
5893 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5894 {
5895 	struct inode *inode = file_inode(file);
5896 	struct btrfs_root *root = BTRFS_I(inode)->root;
5897 	struct btrfs_file_private *private = file->private_data;
5898 	struct btrfs_dir_item *di;
5899 	struct btrfs_key key;
5900 	struct btrfs_key found_key;
5901 	struct btrfs_path *path;
5902 	void *addr;
5903 	LIST_HEAD(ins_list);
5904 	LIST_HEAD(del_list);
5905 	int ret;
5906 	char *name_ptr;
5907 	int name_len;
5908 	int entries = 0;
5909 	int total_len = 0;
5910 	bool put = false;
5911 	struct btrfs_key location;
5912 
5913 	if (!dir_emit_dots(file, ctx))
5914 		return 0;
5915 
5916 	path = btrfs_alloc_path();
5917 	if (!path)
5918 		return -ENOMEM;
5919 
5920 	addr = private->filldir_buf;
5921 	path->reada = READA_FORWARD;
5922 
5923 	put = btrfs_readdir_get_delayed_items(BTRFS_I(inode), private->last_index,
5924 					      &ins_list, &del_list);
5925 
5926 again:
5927 	key.type = BTRFS_DIR_INDEX_KEY;
5928 	key.offset = ctx->pos;
5929 	key.objectid = btrfs_ino(BTRFS_I(inode));
5930 
5931 	btrfs_for_each_slot(root, &key, &found_key, path, ret) {
5932 		struct dir_entry *entry;
5933 		struct extent_buffer *leaf = path->nodes[0];
5934 		u8 ftype;
5935 
5936 		if (found_key.objectid != key.objectid)
5937 			break;
5938 		if (found_key.type != BTRFS_DIR_INDEX_KEY)
5939 			break;
5940 		if (found_key.offset < ctx->pos)
5941 			continue;
5942 		if (found_key.offset > private->last_index)
5943 			break;
5944 		if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
5945 			continue;
5946 		di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
5947 		name_len = btrfs_dir_name_len(leaf, di);
5948 		if ((total_len + sizeof(struct dir_entry) + name_len) >=
5949 		    PAGE_SIZE) {
5950 			btrfs_release_path(path);
5951 			ret = btrfs_filldir(private->filldir_buf, entries, ctx);
5952 			if (ret)
5953 				goto nopos;
5954 			addr = private->filldir_buf;
5955 			entries = 0;
5956 			total_len = 0;
5957 			goto again;
5958 		}
5959 
5960 		ftype = btrfs_dir_flags_to_ftype(btrfs_dir_flags(leaf, di));
5961 		entry = addr;
5962 		name_ptr = (char *)(entry + 1);
5963 		read_extent_buffer(leaf, name_ptr,
5964 				   (unsigned long)(di + 1), name_len);
5965 		put_unaligned(name_len, &entry->name_len);
5966 		put_unaligned(fs_ftype_to_dtype(ftype), &entry->type);
5967 		btrfs_dir_item_key_to_cpu(leaf, di, &location);
5968 		put_unaligned(location.objectid, &entry->ino);
5969 		put_unaligned(found_key.offset, &entry->offset);
5970 		entries++;
5971 		addr += sizeof(struct dir_entry) + name_len;
5972 		total_len += sizeof(struct dir_entry) + name_len;
5973 	}
5974 	/* Catch error encountered during iteration */
5975 	if (ret < 0)
5976 		goto err;
5977 
5978 	btrfs_release_path(path);
5979 
5980 	ret = btrfs_filldir(private->filldir_buf, entries, ctx);
5981 	if (ret)
5982 		goto nopos;
5983 
5984 	ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
5985 	if (ret)
5986 		goto nopos;
5987 
5988 	/*
5989 	 * Stop new entries from being returned after we return the last
5990 	 * entry.
5991 	 *
5992 	 * New directory entries are assigned a strictly increasing
5993 	 * offset.  This means that new entries created during readdir
5994 	 * are *guaranteed* to be seen in the future by that readdir.
5995 	 * This has broken buggy programs which operate on names as
5996 	 * they're returned by readdir.  Until we re-use freed offsets
5997 	 * we have this hack to stop new entries from being returned
5998 	 * under the assumption that they'll never reach this huge
5999 	 * offset.
6000 	 *
6001 	 * This is being careful not to overflow 32bit loff_t unless the
6002 	 * last entry requires it because doing so has broken 32bit apps
6003 	 * in the past.
6004 	 */
6005 	if (ctx->pos >= INT_MAX)
6006 		ctx->pos = LLONG_MAX;
6007 	else
6008 		ctx->pos = INT_MAX;
6009 nopos:
6010 	ret = 0;
6011 err:
6012 	if (put)
6013 		btrfs_readdir_put_delayed_items(BTRFS_I(inode), &ins_list, &del_list);
6014 	btrfs_free_path(path);
6015 	return ret;
6016 }
6017 
6018 /*
6019  * This is somewhat expensive, updating the tree every time the
6020  * inode changes.  But, it is most likely to find the inode in cache.
6021  * FIXME, needs more benchmarking...there are no reasons other than performance
6022  * to keep or drop this code.
6023  */
6024 static int btrfs_dirty_inode(struct btrfs_inode *inode)
6025 {
6026 	struct btrfs_root *root = inode->root;
6027 	struct btrfs_fs_info *fs_info = root->fs_info;
6028 	struct btrfs_trans_handle *trans;
6029 	int ret;
6030 
6031 	if (test_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags))
6032 		return 0;
6033 
6034 	trans = btrfs_join_transaction(root);
6035 	if (IS_ERR(trans))
6036 		return PTR_ERR(trans);
6037 
6038 	ret = btrfs_update_inode(trans, inode);
6039 	if (ret == -ENOSPC || ret == -EDQUOT) {
6040 		/* whoops, lets try again with the full transaction */
6041 		btrfs_end_transaction(trans);
6042 		trans = btrfs_start_transaction(root, 1);
6043 		if (IS_ERR(trans))
6044 			return PTR_ERR(trans);
6045 
6046 		ret = btrfs_update_inode(trans, inode);
6047 	}
6048 	btrfs_end_transaction(trans);
6049 	if (inode->delayed_node)
6050 		btrfs_balance_delayed_items(fs_info);
6051 
6052 	return ret;
6053 }
6054 
6055 /*
6056  * This is a copy of file_update_time.  We need this so we can return error on
6057  * ENOSPC for updating the inode in the case of file write and mmap writes.
6058  */
6059 static int btrfs_update_time(struct inode *inode, int flags)
6060 {
6061 	struct btrfs_root *root = BTRFS_I(inode)->root;
6062 	bool dirty;
6063 
6064 	if (btrfs_root_readonly(root))
6065 		return -EROFS;
6066 
6067 	dirty = inode_update_timestamps(inode, flags);
6068 	return dirty ? btrfs_dirty_inode(BTRFS_I(inode)) : 0;
6069 }
6070 
6071 /*
6072  * helper to find a free sequence number in a given directory.  This current
6073  * code is very simple, later versions will do smarter things in the btree
6074  */
6075 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index)
6076 {
6077 	int ret = 0;
6078 
6079 	if (dir->index_cnt == (u64)-1) {
6080 		ret = btrfs_inode_delayed_dir_index_count(dir);
6081 		if (ret) {
6082 			ret = btrfs_set_inode_index_count(dir);
6083 			if (ret)
6084 				return ret;
6085 		}
6086 	}
6087 
6088 	*index = dir->index_cnt;
6089 	dir->index_cnt++;
6090 
6091 	return ret;
6092 }
6093 
6094 static int btrfs_insert_inode_locked(struct inode *inode)
6095 {
6096 	struct btrfs_iget_args args;
6097 
6098 	args.ino = btrfs_ino(BTRFS_I(inode));
6099 	args.root = BTRFS_I(inode)->root;
6100 
6101 	return insert_inode_locked4(inode,
6102 		   btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
6103 		   btrfs_find_actor, &args);
6104 }
6105 
6106 int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args,
6107 			    unsigned int *trans_num_items)
6108 {
6109 	struct inode *dir = args->dir;
6110 	struct inode *inode = args->inode;
6111 	int ret;
6112 
6113 	if (!args->orphan) {
6114 		ret = fscrypt_setup_filename(dir, &args->dentry->d_name, 0,
6115 					     &args->fname);
6116 		if (ret)
6117 			return ret;
6118 	}
6119 
6120 	ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl);
6121 	if (ret) {
6122 		fscrypt_free_filename(&args->fname);
6123 		return ret;
6124 	}
6125 
6126 	/* 1 to add inode item */
6127 	*trans_num_items = 1;
6128 	/* 1 to add compression property */
6129 	if (BTRFS_I(dir)->prop_compress)
6130 		(*trans_num_items)++;
6131 	/* 1 to add default ACL xattr */
6132 	if (args->default_acl)
6133 		(*trans_num_items)++;
6134 	/* 1 to add access ACL xattr */
6135 	if (args->acl)
6136 		(*trans_num_items)++;
6137 #ifdef CONFIG_SECURITY
6138 	/* 1 to add LSM xattr */
6139 	if (dir->i_security)
6140 		(*trans_num_items)++;
6141 #endif
6142 	if (args->orphan) {
6143 		/* 1 to add orphan item */
6144 		(*trans_num_items)++;
6145 	} else {
6146 		/*
6147 		 * 1 to add dir item
6148 		 * 1 to add dir index
6149 		 * 1 to update parent inode item
6150 		 *
6151 		 * No need for 1 unit for the inode ref item because it is
6152 		 * inserted in a batch together with the inode item at
6153 		 * btrfs_create_new_inode().
6154 		 */
6155 		*trans_num_items += 3;
6156 	}
6157 	return 0;
6158 }
6159 
6160 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args)
6161 {
6162 	posix_acl_release(args->acl);
6163 	posix_acl_release(args->default_acl);
6164 	fscrypt_free_filename(&args->fname);
6165 }
6166 
6167 /*
6168  * Inherit flags from the parent inode.
6169  *
6170  * Currently only the compression flags and the cow flags are inherited.
6171  */
6172 static void btrfs_inherit_iflags(struct btrfs_inode *inode, struct btrfs_inode *dir)
6173 {
6174 	unsigned int flags;
6175 
6176 	flags = dir->flags;
6177 
6178 	if (flags & BTRFS_INODE_NOCOMPRESS) {
6179 		inode->flags &= ~BTRFS_INODE_COMPRESS;
6180 		inode->flags |= BTRFS_INODE_NOCOMPRESS;
6181 	} else if (flags & BTRFS_INODE_COMPRESS) {
6182 		inode->flags &= ~BTRFS_INODE_NOCOMPRESS;
6183 		inode->flags |= BTRFS_INODE_COMPRESS;
6184 	}
6185 
6186 	if (flags & BTRFS_INODE_NODATACOW) {
6187 		inode->flags |= BTRFS_INODE_NODATACOW;
6188 		if (S_ISREG(inode->vfs_inode.i_mode))
6189 			inode->flags |= BTRFS_INODE_NODATASUM;
6190 	}
6191 
6192 	btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode);
6193 }
6194 
6195 int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
6196 			   struct btrfs_new_inode_args *args)
6197 {
6198 	struct timespec64 ts;
6199 	struct inode *dir = args->dir;
6200 	struct inode *inode = args->inode;
6201 	const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name;
6202 	struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
6203 	struct btrfs_root *root;
6204 	struct btrfs_inode_item *inode_item;
6205 	struct btrfs_path *path;
6206 	u64 objectid;
6207 	struct btrfs_inode_ref *ref;
6208 	struct btrfs_key key[2];
6209 	u32 sizes[2];
6210 	struct btrfs_item_batch batch;
6211 	unsigned long ptr;
6212 	int ret;
6213 	bool xa_reserved = false;
6214 
6215 	path = btrfs_alloc_path();
6216 	if (!path)
6217 		return -ENOMEM;
6218 
6219 	if (!args->subvol)
6220 		BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root);
6221 	root = BTRFS_I(inode)->root;
6222 
6223 	ret = btrfs_init_file_extent_tree(BTRFS_I(inode));
6224 	if (ret)
6225 		goto out;
6226 
6227 	ret = btrfs_get_free_objectid(root, &objectid);
6228 	if (ret)
6229 		goto out;
6230 	btrfs_set_inode_number(BTRFS_I(inode), objectid);
6231 
6232 	ret = xa_reserve(&root->inodes, objectid, GFP_NOFS);
6233 	if (ret)
6234 		goto out;
6235 	xa_reserved = true;
6236 
6237 	if (args->orphan) {
6238 		/*
6239 		 * O_TMPFILE, set link count to 0, so that after this point, we
6240 		 * fill in an inode item with the correct link count.
6241 		 */
6242 		set_nlink(inode, 0);
6243 	} else {
6244 		trace_btrfs_inode_request(dir);
6245 
6246 		ret = btrfs_set_inode_index(BTRFS_I(dir), &BTRFS_I(inode)->dir_index);
6247 		if (ret)
6248 			goto out;
6249 	}
6250 
6251 	if (S_ISDIR(inode->i_mode))
6252 		BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX;
6253 
6254 	BTRFS_I(inode)->generation = trans->transid;
6255 	inode->i_generation = BTRFS_I(inode)->generation;
6256 
6257 	/*
6258 	 * We don't have any capability xattrs set here yet, shortcut any
6259 	 * queries for the xattrs here.  If we add them later via the inode
6260 	 * security init path or any other path this flag will be cleared.
6261 	 */
6262 	set_bit(BTRFS_INODE_NO_CAP_XATTR, &BTRFS_I(inode)->runtime_flags);
6263 
6264 	/*
6265 	 * Subvolumes don't inherit flags from their parent directory.
6266 	 * Originally this was probably by accident, but we probably can't
6267 	 * change it now without compatibility issues.
6268 	 */
6269 	if (!args->subvol)
6270 		btrfs_inherit_iflags(BTRFS_I(inode), BTRFS_I(dir));
6271 
6272 	if (S_ISREG(inode->i_mode)) {
6273 		if (btrfs_test_opt(fs_info, NODATASUM))
6274 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6275 		if (btrfs_test_opt(fs_info, NODATACOW))
6276 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
6277 				BTRFS_INODE_NODATASUM;
6278 	}
6279 
6280 	ret = btrfs_insert_inode_locked(inode);
6281 	if (ret < 0) {
6282 		if (!args->orphan)
6283 			BTRFS_I(dir)->index_cnt--;
6284 		goto out;
6285 	}
6286 
6287 	/*
6288 	 * We could have gotten an inode number from somebody who was fsynced
6289 	 * and then removed in this same transaction, so let's just set full
6290 	 * sync since it will be a full sync anyway and this will blow away the
6291 	 * old info in the log.
6292 	 */
6293 	btrfs_set_inode_full_sync(BTRFS_I(inode));
6294 
6295 	key[0].objectid = objectid;
6296 	key[0].type = BTRFS_INODE_ITEM_KEY;
6297 	key[0].offset = 0;
6298 
6299 	sizes[0] = sizeof(struct btrfs_inode_item);
6300 
6301 	if (!args->orphan) {
6302 		/*
6303 		 * Start new inodes with an inode_ref. This is slightly more
6304 		 * efficient for small numbers of hard links since they will
6305 		 * be packed into one item. Extended refs will kick in if we
6306 		 * add more hard links than can fit in the ref item.
6307 		 */
6308 		key[1].objectid = objectid;
6309 		key[1].type = BTRFS_INODE_REF_KEY;
6310 		if (args->subvol) {
6311 			key[1].offset = objectid;
6312 			sizes[1] = 2 + sizeof(*ref);
6313 		} else {
6314 			key[1].offset = btrfs_ino(BTRFS_I(dir));
6315 			sizes[1] = name->len + sizeof(*ref);
6316 		}
6317 	}
6318 
6319 	batch.keys = &key[0];
6320 	batch.data_sizes = &sizes[0];
6321 	batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]);
6322 	batch.nr = args->orphan ? 1 : 2;
6323 	ret = btrfs_insert_empty_items(trans, root, path, &batch);
6324 	if (ret != 0) {
6325 		btrfs_abort_transaction(trans, ret);
6326 		goto discard;
6327 	}
6328 
6329 	ts = simple_inode_init_ts(inode);
6330 	BTRFS_I(inode)->i_otime_sec = ts.tv_sec;
6331 	BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec;
6332 
6333 	/*
6334 	 * We're going to fill the inode item now, so at this point the inode
6335 	 * must be fully initialized.
6336 	 */
6337 
6338 	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
6339 				  struct btrfs_inode_item);
6340 	memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item,
6341 			     sizeof(*inode_item));
6342 	fill_inode_item(trans, path->nodes[0], inode_item, inode);
6343 
6344 	if (!args->orphan) {
6345 		ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
6346 				     struct btrfs_inode_ref);
6347 		ptr = (unsigned long)(ref + 1);
6348 		if (args->subvol) {
6349 			btrfs_set_inode_ref_name_len(path->nodes[0], ref, 2);
6350 			btrfs_set_inode_ref_index(path->nodes[0], ref, 0);
6351 			write_extent_buffer(path->nodes[0], "..", ptr, 2);
6352 		} else {
6353 			btrfs_set_inode_ref_name_len(path->nodes[0], ref,
6354 						     name->len);
6355 			btrfs_set_inode_ref_index(path->nodes[0], ref,
6356 						  BTRFS_I(inode)->dir_index);
6357 			write_extent_buffer(path->nodes[0], name->name, ptr,
6358 					    name->len);
6359 		}
6360 	}
6361 
6362 	btrfs_mark_buffer_dirty(trans, path->nodes[0]);
6363 	/*
6364 	 * We don't need the path anymore, plus inheriting properties, adding
6365 	 * ACLs, security xattrs, orphan item or adding the link, will result in
6366 	 * allocating yet another path. So just free our path.
6367 	 */
6368 	btrfs_free_path(path);
6369 	path = NULL;
6370 
6371 	if (args->subvol) {
6372 		struct inode *parent;
6373 
6374 		/*
6375 		 * Subvolumes inherit properties from their parent subvolume,
6376 		 * not the directory they were created in.
6377 		 */
6378 		parent = btrfs_iget(BTRFS_FIRST_FREE_OBJECTID, BTRFS_I(dir)->root);
6379 		if (IS_ERR(parent)) {
6380 			ret = PTR_ERR(parent);
6381 		} else {
6382 			ret = btrfs_inode_inherit_props(trans, inode, parent);
6383 			iput(parent);
6384 		}
6385 	} else {
6386 		ret = btrfs_inode_inherit_props(trans, inode, dir);
6387 	}
6388 	if (ret) {
6389 		btrfs_err(fs_info,
6390 			  "error inheriting props for ino %llu (root %llu): %d",
6391 			  btrfs_ino(BTRFS_I(inode)), btrfs_root_id(root), ret);
6392 	}
6393 
6394 	/*
6395 	 * Subvolumes don't inherit ACLs or get passed to the LSM. This is
6396 	 * probably a bug.
6397 	 */
6398 	if (!args->subvol) {
6399 		ret = btrfs_init_inode_security(trans, args);
6400 		if (ret) {
6401 			btrfs_abort_transaction(trans, ret);
6402 			goto discard;
6403 		}
6404 	}
6405 
6406 	ret = btrfs_add_inode_to_root(BTRFS_I(inode), false);
6407 	if (WARN_ON(ret)) {
6408 		/* Shouldn't happen, we used xa_reserve() before. */
6409 		btrfs_abort_transaction(trans, ret);
6410 		goto discard;
6411 	}
6412 
6413 	trace_btrfs_inode_new(inode);
6414 	btrfs_set_inode_last_trans(trans, BTRFS_I(inode));
6415 
6416 	btrfs_update_root_times(trans, root);
6417 
6418 	if (args->orphan) {
6419 		ret = btrfs_orphan_add(trans, BTRFS_I(inode));
6420 	} else {
6421 		ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
6422 				     0, BTRFS_I(inode)->dir_index);
6423 	}
6424 	if (ret) {
6425 		btrfs_abort_transaction(trans, ret);
6426 		goto discard;
6427 	}
6428 
6429 	return 0;
6430 
6431 discard:
6432 	/*
6433 	 * discard_new_inode() calls iput(), but the caller owns the reference
6434 	 * to the inode.
6435 	 */
6436 	ihold(inode);
6437 	discard_new_inode(inode);
6438 out:
6439 	if (xa_reserved)
6440 		xa_release(&root->inodes, objectid);
6441 
6442 	btrfs_free_path(path);
6443 	return ret;
6444 }
6445 
6446 /*
6447  * utility function to add 'inode' into 'parent_inode' with
6448  * a give name and a given sequence number.
6449  * if 'add_backref' is true, also insert a backref from the
6450  * inode to the parent directory.
6451  */
6452 int btrfs_add_link(struct btrfs_trans_handle *trans,
6453 		   struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
6454 		   const struct fscrypt_str *name, int add_backref, u64 index)
6455 {
6456 	int ret = 0;
6457 	struct btrfs_key key;
6458 	struct btrfs_root *root = parent_inode->root;
6459 	u64 ino = btrfs_ino(inode);
6460 	u64 parent_ino = btrfs_ino(parent_inode);
6461 
6462 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6463 		memcpy(&key, &inode->root->root_key, sizeof(key));
6464 	} else {
6465 		key.objectid = ino;
6466 		key.type = BTRFS_INODE_ITEM_KEY;
6467 		key.offset = 0;
6468 	}
6469 
6470 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6471 		ret = btrfs_add_root_ref(trans, key.objectid,
6472 					 btrfs_root_id(root), parent_ino,
6473 					 index, name);
6474 	} else if (add_backref) {
6475 		ret = btrfs_insert_inode_ref(trans, root, name,
6476 					     ino, parent_ino, index);
6477 	}
6478 
6479 	/* Nothing to clean up yet */
6480 	if (ret)
6481 		return ret;
6482 
6483 	ret = btrfs_insert_dir_item(trans, name, parent_inode, &key,
6484 				    btrfs_inode_type(&inode->vfs_inode), index);
6485 	if (ret == -EEXIST || ret == -EOVERFLOW)
6486 		goto fail_dir_item;
6487 	else if (ret) {
6488 		btrfs_abort_transaction(trans, ret);
6489 		return ret;
6490 	}
6491 
6492 	btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
6493 			   name->len * 2);
6494 	inode_inc_iversion(&parent_inode->vfs_inode);
6495 	/*
6496 	 * If we are replaying a log tree, we do not want to update the mtime
6497 	 * and ctime of the parent directory with the current time, since the
6498 	 * log replay procedure is responsible for setting them to their correct
6499 	 * values (the ones it had when the fsync was done).
6500 	 */
6501 	if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags))
6502 		inode_set_mtime_to_ts(&parent_inode->vfs_inode,
6503 				      inode_set_ctime_current(&parent_inode->vfs_inode));
6504 
6505 	ret = btrfs_update_inode(trans, parent_inode);
6506 	if (ret)
6507 		btrfs_abort_transaction(trans, ret);
6508 	return ret;
6509 
6510 fail_dir_item:
6511 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6512 		u64 local_index;
6513 		int err;
6514 		err = btrfs_del_root_ref(trans, key.objectid,
6515 					 btrfs_root_id(root), parent_ino,
6516 					 &local_index, name);
6517 		if (err)
6518 			btrfs_abort_transaction(trans, err);
6519 	} else if (add_backref) {
6520 		u64 local_index;
6521 		int err;
6522 
6523 		err = btrfs_del_inode_ref(trans, root, name, ino, parent_ino,
6524 					  &local_index);
6525 		if (err)
6526 			btrfs_abort_transaction(trans, err);
6527 	}
6528 
6529 	/* Return the original error code */
6530 	return ret;
6531 }
6532 
6533 static int btrfs_create_common(struct inode *dir, struct dentry *dentry,
6534 			       struct inode *inode)
6535 {
6536 	struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
6537 	struct btrfs_root *root = BTRFS_I(dir)->root;
6538 	struct btrfs_new_inode_args new_inode_args = {
6539 		.dir = dir,
6540 		.dentry = dentry,
6541 		.inode = inode,
6542 	};
6543 	unsigned int trans_num_items;
6544 	struct btrfs_trans_handle *trans;
6545 	int err;
6546 
6547 	err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
6548 	if (err)
6549 		goto out_inode;
6550 
6551 	trans = btrfs_start_transaction(root, trans_num_items);
6552 	if (IS_ERR(trans)) {
6553 		err = PTR_ERR(trans);
6554 		goto out_new_inode_args;
6555 	}
6556 
6557 	err = btrfs_create_new_inode(trans, &new_inode_args);
6558 	if (!err)
6559 		d_instantiate_new(dentry, inode);
6560 
6561 	btrfs_end_transaction(trans);
6562 	btrfs_btree_balance_dirty(fs_info);
6563 out_new_inode_args:
6564 	btrfs_new_inode_args_destroy(&new_inode_args);
6565 out_inode:
6566 	if (err)
6567 		iput(inode);
6568 	return err;
6569 }
6570 
6571 static int btrfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
6572 		       struct dentry *dentry, umode_t mode, dev_t rdev)
6573 {
6574 	struct inode *inode;
6575 
6576 	inode = new_inode(dir->i_sb);
6577 	if (!inode)
6578 		return -ENOMEM;
6579 	inode_init_owner(idmap, inode, dir, mode);
6580 	inode->i_op = &btrfs_special_inode_operations;
6581 	init_special_inode(inode, inode->i_mode, rdev);
6582 	return btrfs_create_common(dir, dentry, inode);
6583 }
6584 
6585 static int btrfs_create(struct mnt_idmap *idmap, struct inode *dir,
6586 			struct dentry *dentry, umode_t mode, bool excl)
6587 {
6588 	struct inode *inode;
6589 
6590 	inode = new_inode(dir->i_sb);
6591 	if (!inode)
6592 		return -ENOMEM;
6593 	inode_init_owner(idmap, inode, dir, mode);
6594 	inode->i_fop = &btrfs_file_operations;
6595 	inode->i_op = &btrfs_file_inode_operations;
6596 	inode->i_mapping->a_ops = &btrfs_aops;
6597 	return btrfs_create_common(dir, dentry, inode);
6598 }
6599 
6600 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
6601 		      struct dentry *dentry)
6602 {
6603 	struct btrfs_trans_handle *trans = NULL;
6604 	struct btrfs_root *root = BTRFS_I(dir)->root;
6605 	struct inode *inode = d_inode(old_dentry);
6606 	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
6607 	struct fscrypt_name fname;
6608 	u64 index;
6609 	int err;
6610 	int drop_inode = 0;
6611 
6612 	/* do not allow sys_link's with other subvols of the same device */
6613 	if (btrfs_root_id(root) != btrfs_root_id(BTRFS_I(inode)->root))
6614 		return -EXDEV;
6615 
6616 	if (inode->i_nlink >= BTRFS_LINK_MAX)
6617 		return -EMLINK;
6618 
6619 	err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname);
6620 	if (err)
6621 		goto fail;
6622 
6623 	err = btrfs_set_inode_index(BTRFS_I(dir), &index);
6624 	if (err)
6625 		goto fail;
6626 
6627 	/*
6628 	 * 2 items for inode and inode ref
6629 	 * 2 items for dir items
6630 	 * 1 item for parent inode
6631 	 * 1 item for orphan item deletion if O_TMPFILE
6632 	 */
6633 	trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6);
6634 	if (IS_ERR(trans)) {
6635 		err = PTR_ERR(trans);
6636 		trans = NULL;
6637 		goto fail;
6638 	}
6639 
6640 	/* There are several dir indexes for this inode, clear the cache. */
6641 	BTRFS_I(inode)->dir_index = 0ULL;
6642 	inc_nlink(inode);
6643 	inode_inc_iversion(inode);
6644 	inode_set_ctime_current(inode);
6645 	ihold(inode);
6646 	set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
6647 
6648 	err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
6649 			     &fname.disk_name, 1, index);
6650 
6651 	if (err) {
6652 		drop_inode = 1;
6653 	} else {
6654 		struct dentry *parent = dentry->d_parent;
6655 
6656 		err = btrfs_update_inode(trans, BTRFS_I(inode));
6657 		if (err)
6658 			goto fail;
6659 		if (inode->i_nlink == 1) {
6660 			/*
6661 			 * If new hard link count is 1, it's a file created
6662 			 * with open(2) O_TMPFILE flag.
6663 			 */
6664 			err = btrfs_orphan_del(trans, BTRFS_I(inode));
6665 			if (err)
6666 				goto fail;
6667 		}
6668 		d_instantiate(dentry, inode);
6669 		btrfs_log_new_name(trans, old_dentry, NULL, 0, parent);
6670 	}
6671 
6672 fail:
6673 	fscrypt_free_filename(&fname);
6674 	if (trans)
6675 		btrfs_end_transaction(trans);
6676 	if (drop_inode) {
6677 		inode_dec_link_count(inode);
6678 		iput(inode);
6679 	}
6680 	btrfs_btree_balance_dirty(fs_info);
6681 	return err;
6682 }
6683 
6684 static int btrfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
6685 		       struct dentry *dentry, umode_t mode)
6686 {
6687 	struct inode *inode;
6688 
6689 	inode = new_inode(dir->i_sb);
6690 	if (!inode)
6691 		return -ENOMEM;
6692 	inode_init_owner(idmap, inode, dir, S_IFDIR | mode);
6693 	inode->i_op = &btrfs_dir_inode_operations;
6694 	inode->i_fop = &btrfs_dir_file_operations;
6695 	return btrfs_create_common(dir, dentry, inode);
6696 }
6697 
6698 static noinline int uncompress_inline(struct btrfs_path *path,
6699 				      struct page *page,
6700 				      struct btrfs_file_extent_item *item)
6701 {
6702 	int ret;
6703 	struct extent_buffer *leaf = path->nodes[0];
6704 	char *tmp;
6705 	size_t max_size;
6706 	unsigned long inline_size;
6707 	unsigned long ptr;
6708 	int compress_type;
6709 
6710 	compress_type = btrfs_file_extent_compression(leaf, item);
6711 	max_size = btrfs_file_extent_ram_bytes(leaf, item);
6712 	inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]);
6713 	tmp = kmalloc(inline_size, GFP_NOFS);
6714 	if (!tmp)
6715 		return -ENOMEM;
6716 	ptr = btrfs_file_extent_inline_start(item);
6717 
6718 	read_extent_buffer(leaf, tmp, ptr, inline_size);
6719 
6720 	max_size = min_t(unsigned long, PAGE_SIZE, max_size);
6721 	ret = btrfs_decompress(compress_type, tmp, page, 0, inline_size, max_size);
6722 
6723 	/*
6724 	 * decompression code contains a memset to fill in any space between the end
6725 	 * of the uncompressed data and the end of max_size in case the decompressed
6726 	 * data ends up shorter than ram_bytes.  That doesn't cover the hole between
6727 	 * the end of an inline extent and the beginning of the next block, so we
6728 	 * cover that region here.
6729 	 */
6730 
6731 	if (max_size < PAGE_SIZE)
6732 		memzero_page(page, max_size, PAGE_SIZE - max_size);
6733 	kfree(tmp);
6734 	return ret;
6735 }
6736 
6737 static int read_inline_extent(struct btrfs_inode *inode, struct btrfs_path *path,
6738 			      struct page *page)
6739 {
6740 	struct btrfs_file_extent_item *fi;
6741 	void *kaddr;
6742 	size_t copy_size;
6743 
6744 	if (!page || PageUptodate(page))
6745 		return 0;
6746 
6747 	ASSERT(page_offset(page) == 0);
6748 
6749 	fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
6750 			    struct btrfs_file_extent_item);
6751 	if (btrfs_file_extent_compression(path->nodes[0], fi) != BTRFS_COMPRESS_NONE)
6752 		return uncompress_inline(path, page, fi);
6753 
6754 	copy_size = min_t(u64, PAGE_SIZE,
6755 			  btrfs_file_extent_ram_bytes(path->nodes[0], fi));
6756 	kaddr = kmap_local_page(page);
6757 	read_extent_buffer(path->nodes[0], kaddr,
6758 			   btrfs_file_extent_inline_start(fi), copy_size);
6759 	kunmap_local(kaddr);
6760 	if (copy_size < PAGE_SIZE)
6761 		memzero_page(page, copy_size, PAGE_SIZE - copy_size);
6762 	return 0;
6763 }
6764 
6765 /*
6766  * Lookup the first extent overlapping a range in a file.
6767  *
6768  * @inode:	file to search in
6769  * @page:	page to read extent data into if the extent is inline
6770  * @start:	file offset
6771  * @len:	length of range starting at @start
6772  *
6773  * Return the first &struct extent_map which overlaps the given range, reading
6774  * it from the B-tree and caching it if necessary. Note that there may be more
6775  * extents which overlap the given range after the returned extent_map.
6776  *
6777  * If @page is not NULL and the extent is inline, this also reads the extent
6778  * data directly into the page and marks the extent up to date in the io_tree.
6779  *
6780  * Return: ERR_PTR on error, non-NULL extent_map on success.
6781  */
6782 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
6783 				    struct page *page, u64 start, u64 len)
6784 {
6785 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
6786 	int ret = 0;
6787 	u64 extent_start = 0;
6788 	u64 extent_end = 0;
6789 	u64 objectid = btrfs_ino(inode);
6790 	int extent_type = -1;
6791 	struct btrfs_path *path = NULL;
6792 	struct btrfs_root *root = inode->root;
6793 	struct btrfs_file_extent_item *item;
6794 	struct extent_buffer *leaf;
6795 	struct btrfs_key found_key;
6796 	struct extent_map *em = NULL;
6797 	struct extent_map_tree *em_tree = &inode->extent_tree;
6798 
6799 	read_lock(&em_tree->lock);
6800 	em = lookup_extent_mapping(em_tree, start, len);
6801 	read_unlock(&em_tree->lock);
6802 
6803 	if (em) {
6804 		if (em->start > start || em->start + em->len <= start)
6805 			free_extent_map(em);
6806 		else if (em->disk_bytenr == EXTENT_MAP_INLINE && page)
6807 			free_extent_map(em);
6808 		else
6809 			goto out;
6810 	}
6811 	em = alloc_extent_map();
6812 	if (!em) {
6813 		ret = -ENOMEM;
6814 		goto out;
6815 	}
6816 	em->start = EXTENT_MAP_HOLE;
6817 	em->disk_bytenr = EXTENT_MAP_HOLE;
6818 	em->len = (u64)-1;
6819 
6820 	path = btrfs_alloc_path();
6821 	if (!path) {
6822 		ret = -ENOMEM;
6823 		goto out;
6824 	}
6825 
6826 	/* Chances are we'll be called again, so go ahead and do readahead */
6827 	path->reada = READA_FORWARD;
6828 
6829 	/*
6830 	 * The same explanation in load_free_space_cache applies here as well,
6831 	 * we only read when we're loading the free space cache, and at that
6832 	 * point the commit_root has everything we need.
6833 	 */
6834 	if (btrfs_is_free_space_inode(inode)) {
6835 		path->search_commit_root = 1;
6836 		path->skip_locking = 1;
6837 	}
6838 
6839 	ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0);
6840 	if (ret < 0) {
6841 		goto out;
6842 	} else if (ret > 0) {
6843 		if (path->slots[0] == 0)
6844 			goto not_found;
6845 		path->slots[0]--;
6846 		ret = 0;
6847 	}
6848 
6849 	leaf = path->nodes[0];
6850 	item = btrfs_item_ptr(leaf, path->slots[0],
6851 			      struct btrfs_file_extent_item);
6852 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6853 	if (found_key.objectid != objectid ||
6854 	    found_key.type != BTRFS_EXTENT_DATA_KEY) {
6855 		/*
6856 		 * If we backup past the first extent we want to move forward
6857 		 * and see if there is an extent in front of us, otherwise we'll
6858 		 * say there is a hole for our whole search range which can
6859 		 * cause problems.
6860 		 */
6861 		extent_end = start;
6862 		goto next;
6863 	}
6864 
6865 	extent_type = btrfs_file_extent_type(leaf, item);
6866 	extent_start = found_key.offset;
6867 	extent_end = btrfs_file_extent_end(path);
6868 	if (extent_type == BTRFS_FILE_EXTENT_REG ||
6869 	    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
6870 		/* Only regular file could have regular/prealloc extent */
6871 		if (!S_ISREG(inode->vfs_inode.i_mode)) {
6872 			ret = -EUCLEAN;
6873 			btrfs_crit(fs_info,
6874 		"regular/prealloc extent found for non-regular inode %llu",
6875 				   btrfs_ino(inode));
6876 			goto out;
6877 		}
6878 		trace_btrfs_get_extent_show_fi_regular(inode, leaf, item,
6879 						       extent_start);
6880 	} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
6881 		trace_btrfs_get_extent_show_fi_inline(inode, leaf, item,
6882 						      path->slots[0],
6883 						      extent_start);
6884 	}
6885 next:
6886 	if (start >= extent_end) {
6887 		path->slots[0]++;
6888 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
6889 			ret = btrfs_next_leaf(root, path);
6890 			if (ret < 0)
6891 				goto out;
6892 			else if (ret > 0)
6893 				goto not_found;
6894 
6895 			leaf = path->nodes[0];
6896 		}
6897 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6898 		if (found_key.objectid != objectid ||
6899 		    found_key.type != BTRFS_EXTENT_DATA_KEY)
6900 			goto not_found;
6901 		if (start + len <= found_key.offset)
6902 			goto not_found;
6903 		if (start > found_key.offset)
6904 			goto next;
6905 
6906 		/* New extent overlaps with existing one */
6907 		em->start = start;
6908 		em->len = found_key.offset - start;
6909 		em->disk_bytenr = EXTENT_MAP_HOLE;
6910 		goto insert;
6911 	}
6912 
6913 	btrfs_extent_item_to_extent_map(inode, path, item, em);
6914 
6915 	if (extent_type == BTRFS_FILE_EXTENT_REG ||
6916 	    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
6917 		goto insert;
6918 	} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
6919 		/*
6920 		 * Inline extent can only exist at file offset 0. This is
6921 		 * ensured by tree-checker and inline extent creation path.
6922 		 * Thus all members representing file offsets should be zero.
6923 		 */
6924 		ASSERT(extent_start == 0);
6925 		ASSERT(em->start == 0);
6926 
6927 		/*
6928 		 * btrfs_extent_item_to_extent_map() should have properly
6929 		 * initialized em members already.
6930 		 *
6931 		 * Other members are not utilized for inline extents.
6932 		 */
6933 		ASSERT(em->disk_bytenr == EXTENT_MAP_INLINE);
6934 		ASSERT(em->len == fs_info->sectorsize);
6935 
6936 		ret = read_inline_extent(inode, path, page);
6937 		if (ret < 0)
6938 			goto out;
6939 		goto insert;
6940 	}
6941 not_found:
6942 	em->start = start;
6943 	em->len = len;
6944 	em->disk_bytenr = EXTENT_MAP_HOLE;
6945 insert:
6946 	ret = 0;
6947 	btrfs_release_path(path);
6948 	if (em->start > start || extent_map_end(em) <= start) {
6949 		btrfs_err(fs_info,
6950 			  "bad extent! em: [%llu %llu] passed [%llu %llu]",
6951 			  em->start, em->len, start, len);
6952 		ret = -EIO;
6953 		goto out;
6954 	}
6955 
6956 	write_lock(&em_tree->lock);
6957 	ret = btrfs_add_extent_mapping(inode, &em, start, len);
6958 	write_unlock(&em_tree->lock);
6959 out:
6960 	btrfs_free_path(path);
6961 
6962 	trace_btrfs_get_extent(root, inode, em);
6963 
6964 	if (ret) {
6965 		free_extent_map(em);
6966 		return ERR_PTR(ret);
6967 	}
6968 	return em;
6969 }
6970 
6971 static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
6972 {
6973 	struct btrfs_block_group *block_group;
6974 	bool readonly = false;
6975 
6976 	block_group = btrfs_lookup_block_group(fs_info, bytenr);
6977 	if (!block_group || block_group->ro)
6978 		readonly = true;
6979 	if (block_group)
6980 		btrfs_put_block_group(block_group);
6981 	return readonly;
6982 }
6983 
6984 /*
6985  * Check if we can do nocow write into the range [@offset, @offset + @len)
6986  *
6987  * @offset:	File offset
6988  * @len:	The length to write, will be updated to the nocow writeable
6989  *		range
6990  * @orig_start:	(optional) Return the original file offset of the file extent
6991  * @orig_len:	(optional) Return the original on-disk length of the file extent
6992  * @ram_bytes:	(optional) Return the ram_bytes of the file extent
6993  * @strict:	if true, omit optimizations that might force us into unnecessary
6994  *		cow. e.g., don't trust generation number.
6995  *
6996  * Return:
6997  * >0	and update @len if we can do nocow write
6998  *  0	if we can't do nocow write
6999  * <0	if error happened
7000  *
7001  * NOTE: This only checks the file extents, caller is responsible to wait for
7002  *	 any ordered extents.
7003  */
7004 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
7005 			      struct btrfs_file_extent *file_extent,
7006 			      bool nowait, bool strict)
7007 {
7008 	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
7009 	struct can_nocow_file_extent_args nocow_args = { 0 };
7010 	struct btrfs_path *path;
7011 	int ret;
7012 	struct extent_buffer *leaf;
7013 	struct btrfs_root *root = BTRFS_I(inode)->root;
7014 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7015 	struct btrfs_file_extent_item *fi;
7016 	struct btrfs_key key;
7017 	int found_type;
7018 
7019 	path = btrfs_alloc_path();
7020 	if (!path)
7021 		return -ENOMEM;
7022 	path->nowait = nowait;
7023 
7024 	ret = btrfs_lookup_file_extent(NULL, root, path,
7025 			btrfs_ino(BTRFS_I(inode)), offset, 0);
7026 	if (ret < 0)
7027 		goto out;
7028 
7029 	if (ret == 1) {
7030 		if (path->slots[0] == 0) {
7031 			/* can't find the item, must cow */
7032 			ret = 0;
7033 			goto out;
7034 		}
7035 		path->slots[0]--;
7036 	}
7037 	ret = 0;
7038 	leaf = path->nodes[0];
7039 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
7040 	if (key.objectid != btrfs_ino(BTRFS_I(inode)) ||
7041 	    key.type != BTRFS_EXTENT_DATA_KEY) {
7042 		/* not our file or wrong item type, must cow */
7043 		goto out;
7044 	}
7045 
7046 	if (key.offset > offset) {
7047 		/* Wrong offset, must cow */
7048 		goto out;
7049 	}
7050 
7051 	if (btrfs_file_extent_end(path) <= offset)
7052 		goto out;
7053 
7054 	fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
7055 	found_type = btrfs_file_extent_type(leaf, fi);
7056 
7057 	nocow_args.start = offset;
7058 	nocow_args.end = offset + *len - 1;
7059 	nocow_args.strict = strict;
7060 	nocow_args.free_path = true;
7061 
7062 	ret = can_nocow_file_extent(path, &key, BTRFS_I(inode), &nocow_args);
7063 	/* can_nocow_file_extent() has freed the path. */
7064 	path = NULL;
7065 
7066 	if (ret != 1) {
7067 		/* Treat errors as not being able to NOCOW. */
7068 		ret = 0;
7069 		goto out;
7070 	}
7071 
7072 	ret = 0;
7073 	if (btrfs_extent_readonly(fs_info,
7074 				  nocow_args.file_extent.disk_bytenr +
7075 				  nocow_args.file_extent.offset))
7076 		goto out;
7077 
7078 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7079 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7080 		u64 range_end;
7081 
7082 		range_end = round_up(offset + nocow_args.file_extent.num_bytes,
7083 				     root->fs_info->sectorsize) - 1;
7084 		ret = test_range_bit_exists(io_tree, offset, range_end, EXTENT_DELALLOC);
7085 		if (ret) {
7086 			ret = -EAGAIN;
7087 			goto out;
7088 		}
7089 	}
7090 
7091 	if (file_extent)
7092 		memcpy(file_extent, &nocow_args.file_extent, sizeof(*file_extent));
7093 
7094 	*len = nocow_args.file_extent.num_bytes;
7095 	ret = 1;
7096 out:
7097 	btrfs_free_path(path);
7098 	return ret;
7099 }
7100 
7101 /* The callers of this must take lock_extent() */
7102 struct extent_map *btrfs_create_io_em(struct btrfs_inode *inode, u64 start,
7103 				      const struct btrfs_file_extent *file_extent,
7104 				      int type)
7105 {
7106 	struct extent_map *em;
7107 	int ret;
7108 
7109 	/*
7110 	 * Note the missing NOCOW type.
7111 	 *
7112 	 * For pure NOCOW writes, we should not create an io extent map, but
7113 	 * just reusing the existing one.
7114 	 * Only PREALLOC writes (NOCOW write into preallocated range) can
7115 	 * create an io extent map.
7116 	 */
7117 	ASSERT(type == BTRFS_ORDERED_PREALLOC ||
7118 	       type == BTRFS_ORDERED_COMPRESSED ||
7119 	       type == BTRFS_ORDERED_REGULAR);
7120 
7121 	switch (type) {
7122 	case BTRFS_ORDERED_PREALLOC:
7123 		/* We're only referring part of a larger preallocated extent. */
7124 		ASSERT(file_extent->num_bytes <= file_extent->ram_bytes);
7125 		break;
7126 	case BTRFS_ORDERED_REGULAR:
7127 		/* COW results a new extent matching our file extent size. */
7128 		ASSERT(file_extent->disk_num_bytes == file_extent->num_bytes);
7129 		ASSERT(file_extent->ram_bytes == file_extent->num_bytes);
7130 
7131 		/* Since it's a new extent, we should not have any offset. */
7132 		ASSERT(file_extent->offset == 0);
7133 		break;
7134 	case BTRFS_ORDERED_COMPRESSED:
7135 		/* Must be compressed. */
7136 		ASSERT(file_extent->compression != BTRFS_COMPRESS_NONE);
7137 
7138 		/*
7139 		 * Encoded write can make us to refer to part of the
7140 		 * uncompressed extent.
7141 		 */
7142 		ASSERT(file_extent->num_bytes <= file_extent->ram_bytes);
7143 		break;
7144 	}
7145 
7146 	em = alloc_extent_map();
7147 	if (!em)
7148 		return ERR_PTR(-ENOMEM);
7149 
7150 	em->start = start;
7151 	em->len = file_extent->num_bytes;
7152 	em->disk_bytenr = file_extent->disk_bytenr;
7153 	em->disk_num_bytes = file_extent->disk_num_bytes;
7154 	em->ram_bytes = file_extent->ram_bytes;
7155 	em->generation = -1;
7156 	em->offset = file_extent->offset;
7157 	em->flags |= EXTENT_FLAG_PINNED;
7158 	if (type == BTRFS_ORDERED_COMPRESSED)
7159 		extent_map_set_compression(em, file_extent->compression);
7160 
7161 	ret = btrfs_replace_extent_map_range(inode, em, true);
7162 	if (ret) {
7163 		free_extent_map(em);
7164 		return ERR_PTR(ret);
7165 	}
7166 
7167 	/* em got 2 refs now, callers needs to do free_extent_map once. */
7168 	return em;
7169 }
7170 
7171 /*
7172  * For release_folio() and invalidate_folio() we have a race window where
7173  * folio_end_writeback() is called but the subpage spinlock is not yet released.
7174  * If we continue to release/invalidate the page, we could cause use-after-free
7175  * for subpage spinlock.  So this function is to spin and wait for subpage
7176  * spinlock.
7177  */
7178 static void wait_subpage_spinlock(struct page *page)
7179 {
7180 	struct btrfs_fs_info *fs_info = page_to_fs_info(page);
7181 	struct folio *folio = page_folio(page);
7182 	struct btrfs_subpage *subpage;
7183 
7184 	if (!btrfs_is_subpage(fs_info, page->mapping))
7185 		return;
7186 
7187 	ASSERT(folio_test_private(folio) && folio_get_private(folio));
7188 	subpage = folio_get_private(folio);
7189 
7190 	/*
7191 	 * This may look insane as we just acquire the spinlock and release it,
7192 	 * without doing anything.  But we just want to make sure no one is
7193 	 * still holding the subpage spinlock.
7194 	 * And since the page is not dirty nor writeback, and we have page
7195 	 * locked, the only possible way to hold a spinlock is from the endio
7196 	 * function to clear page writeback.
7197 	 *
7198 	 * Here we just acquire the spinlock so that all existing callers
7199 	 * should exit and we're safe to release/invalidate the page.
7200 	 */
7201 	spin_lock_irq(&subpage->lock);
7202 	spin_unlock_irq(&subpage->lock);
7203 }
7204 
7205 static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
7206 {
7207 	if (try_release_extent_mapping(&folio->page, gfp_flags)) {
7208 		wait_subpage_spinlock(&folio->page);
7209 		clear_page_extent_mapped(&folio->page);
7210 		return true;
7211 	}
7212 	return false;
7213 }
7214 
7215 static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
7216 {
7217 	if (folio_test_writeback(folio) || folio_test_dirty(folio))
7218 		return false;
7219 	return __btrfs_release_folio(folio, gfp_flags);
7220 }
7221 
7222 #ifdef CONFIG_MIGRATION
7223 static int btrfs_migrate_folio(struct address_space *mapping,
7224 			     struct folio *dst, struct folio *src,
7225 			     enum migrate_mode mode)
7226 {
7227 	int ret = filemap_migrate_folio(mapping, dst, src, mode);
7228 
7229 	if (ret != MIGRATEPAGE_SUCCESS)
7230 		return ret;
7231 
7232 	if (folio_test_ordered(src)) {
7233 		folio_clear_ordered(src);
7234 		folio_set_ordered(dst);
7235 	}
7236 
7237 	return MIGRATEPAGE_SUCCESS;
7238 }
7239 #else
7240 #define btrfs_migrate_folio NULL
7241 #endif
7242 
7243 static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
7244 				 size_t length)
7245 {
7246 	struct btrfs_inode *inode = folio_to_inode(folio);
7247 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
7248 	struct extent_io_tree *tree = &inode->io_tree;
7249 	struct extent_state *cached_state = NULL;
7250 	u64 page_start = folio_pos(folio);
7251 	u64 page_end = page_start + folio_size(folio) - 1;
7252 	u64 cur;
7253 	int inode_evicting = inode->vfs_inode.i_state & I_FREEING;
7254 
7255 	/*
7256 	 * We have folio locked so no new ordered extent can be created on this
7257 	 * page, nor bio can be submitted for this folio.
7258 	 *
7259 	 * But already submitted bio can still be finished on this folio.
7260 	 * Furthermore, endio function won't skip folio which has Ordered
7261 	 * (Private2) already cleared, so it's possible for endio and
7262 	 * invalidate_folio to do the same ordered extent accounting twice
7263 	 * on one folio.
7264 	 *
7265 	 * So here we wait for any submitted bios to finish, so that we won't
7266 	 * do double ordered extent accounting on the same folio.
7267 	 */
7268 	folio_wait_writeback(folio);
7269 	wait_subpage_spinlock(&folio->page);
7270 
7271 	/*
7272 	 * For subpage case, we have call sites like
7273 	 * btrfs_punch_hole_lock_range() which passes range not aligned to
7274 	 * sectorsize.
7275 	 * If the range doesn't cover the full folio, we don't need to and
7276 	 * shouldn't clear page extent mapped, as folio->private can still
7277 	 * record subpage dirty bits for other part of the range.
7278 	 *
7279 	 * For cases that invalidate the full folio even the range doesn't
7280 	 * cover the full folio, like invalidating the last folio, we're
7281 	 * still safe to wait for ordered extent to finish.
7282 	 */
7283 	if (!(offset == 0 && length == folio_size(folio))) {
7284 		btrfs_release_folio(folio, GFP_NOFS);
7285 		return;
7286 	}
7287 
7288 	if (!inode_evicting)
7289 		lock_extent(tree, page_start, page_end, &cached_state);
7290 
7291 	cur = page_start;
7292 	while (cur < page_end) {
7293 		struct btrfs_ordered_extent *ordered;
7294 		u64 range_end;
7295 		u32 range_len;
7296 		u32 extra_flags = 0;
7297 
7298 		ordered = btrfs_lookup_first_ordered_range(inode, cur,
7299 							   page_end + 1 - cur);
7300 		if (!ordered) {
7301 			range_end = page_end;
7302 			/*
7303 			 * No ordered extent covering this range, we are safe
7304 			 * to delete all extent states in the range.
7305 			 */
7306 			extra_flags = EXTENT_CLEAR_ALL_BITS;
7307 			goto next;
7308 		}
7309 		if (ordered->file_offset > cur) {
7310 			/*
7311 			 * There is a range between [cur, oe->file_offset) not
7312 			 * covered by any ordered extent.
7313 			 * We are safe to delete all extent states, and handle
7314 			 * the ordered extent in the next iteration.
7315 			 */
7316 			range_end = ordered->file_offset - 1;
7317 			extra_flags = EXTENT_CLEAR_ALL_BITS;
7318 			goto next;
7319 		}
7320 
7321 		range_end = min(ordered->file_offset + ordered->num_bytes - 1,
7322 				page_end);
7323 		ASSERT(range_end + 1 - cur < U32_MAX);
7324 		range_len = range_end + 1 - cur;
7325 		if (!btrfs_folio_test_ordered(fs_info, folio, cur, range_len)) {
7326 			/*
7327 			 * If Ordered (Private2) is cleared, it means endio has
7328 			 * already been executed for the range.
7329 			 * We can't delete the extent states as
7330 			 * btrfs_finish_ordered_io() may still use some of them.
7331 			 */
7332 			goto next;
7333 		}
7334 		btrfs_folio_clear_ordered(fs_info, folio, cur, range_len);
7335 
7336 		/*
7337 		 * IO on this page will never be started, so we need to account
7338 		 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW
7339 		 * here, must leave that up for the ordered extent completion.
7340 		 *
7341 		 * This will also unlock the range for incoming
7342 		 * btrfs_finish_ordered_io().
7343 		 */
7344 		if (!inode_evicting)
7345 			clear_extent_bit(tree, cur, range_end,
7346 					 EXTENT_DELALLOC |
7347 					 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
7348 					 EXTENT_DEFRAG, &cached_state);
7349 
7350 		spin_lock_irq(&inode->ordered_tree_lock);
7351 		set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
7352 		ordered->truncated_len = min(ordered->truncated_len,
7353 					     cur - ordered->file_offset);
7354 		spin_unlock_irq(&inode->ordered_tree_lock);
7355 
7356 		/*
7357 		 * If the ordered extent has finished, we're safe to delete all
7358 		 * the extent states of the range, otherwise
7359 		 * btrfs_finish_ordered_io() will get executed by endio for
7360 		 * other pages, so we can't delete extent states.
7361 		 */
7362 		if (btrfs_dec_test_ordered_pending(inode, &ordered,
7363 						   cur, range_end + 1 - cur)) {
7364 			btrfs_finish_ordered_io(ordered);
7365 			/*
7366 			 * The ordered extent has finished, now we're again
7367 			 * safe to delete all extent states of the range.
7368 			 */
7369 			extra_flags = EXTENT_CLEAR_ALL_BITS;
7370 		}
7371 next:
7372 		if (ordered)
7373 			btrfs_put_ordered_extent(ordered);
7374 		/*
7375 		 * Qgroup reserved space handler
7376 		 * Sector(s) here will be either:
7377 		 *
7378 		 * 1) Already written to disk or bio already finished
7379 		 *    Then its QGROUP_RESERVED bit in io_tree is already cleared.
7380 		 *    Qgroup will be handled by its qgroup_record then.
7381 		 *    btrfs_qgroup_free_data() call will do nothing here.
7382 		 *
7383 		 * 2) Not written to disk yet
7384 		 *    Then btrfs_qgroup_free_data() call will clear the
7385 		 *    QGROUP_RESERVED bit of its io_tree, and free the qgroup
7386 		 *    reserved data space.
7387 		 *    Since the IO will never happen for this page.
7388 		 */
7389 		btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur, NULL);
7390 		if (!inode_evicting) {
7391 			clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED |
7392 				 EXTENT_DELALLOC | EXTENT_UPTODATE |
7393 				 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG |
7394 				 extra_flags, &cached_state);
7395 		}
7396 		cur = range_end + 1;
7397 	}
7398 	/*
7399 	 * We have iterated through all ordered extents of the page, the page
7400 	 * should not have Ordered (Private2) anymore, or the above iteration
7401 	 * did something wrong.
7402 	 */
7403 	ASSERT(!folio_test_ordered(folio));
7404 	btrfs_folio_clear_checked(fs_info, folio, folio_pos(folio), folio_size(folio));
7405 	if (!inode_evicting)
7406 		__btrfs_release_folio(folio, GFP_NOFS);
7407 	clear_page_extent_mapped(&folio->page);
7408 }
7409 
7410 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
7411 {
7412 	struct btrfs_truncate_control control = {
7413 		.inode = inode,
7414 		.ino = btrfs_ino(inode),
7415 		.min_type = BTRFS_EXTENT_DATA_KEY,
7416 		.clear_extent_range = true,
7417 	};
7418 	struct btrfs_root *root = inode->root;
7419 	struct btrfs_fs_info *fs_info = root->fs_info;
7420 	struct btrfs_block_rsv *rsv;
7421 	int ret;
7422 	struct btrfs_trans_handle *trans;
7423 	u64 mask = fs_info->sectorsize - 1;
7424 	const u64 min_size = btrfs_calc_metadata_size(fs_info, 1);
7425 
7426 	if (!skip_writeback) {
7427 		ret = btrfs_wait_ordered_range(inode,
7428 					       inode->vfs_inode.i_size & (~mask),
7429 					       (u64)-1);
7430 		if (ret)
7431 			return ret;
7432 	}
7433 
7434 	/*
7435 	 * Yes ladies and gentlemen, this is indeed ugly.  We have a couple of
7436 	 * things going on here:
7437 	 *
7438 	 * 1) We need to reserve space to update our inode.
7439 	 *
7440 	 * 2) We need to have something to cache all the space that is going to
7441 	 * be free'd up by the truncate operation, but also have some slack
7442 	 * space reserved in case it uses space during the truncate (thank you
7443 	 * very much snapshotting).
7444 	 *
7445 	 * And we need these to be separate.  The fact is we can use a lot of
7446 	 * space doing the truncate, and we have no earthly idea how much space
7447 	 * we will use, so we need the truncate reservation to be separate so it
7448 	 * doesn't end up using space reserved for updating the inode.  We also
7449 	 * need to be able to stop the transaction and start a new one, which
7450 	 * means we need to be able to update the inode several times, and we
7451 	 * have no idea of knowing how many times that will be, so we can't just
7452 	 * reserve 1 item for the entirety of the operation, so that has to be
7453 	 * done separately as well.
7454 	 *
7455 	 * So that leaves us with
7456 	 *
7457 	 * 1) rsv - for the truncate reservation, which we will steal from the
7458 	 * transaction reservation.
7459 	 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for
7460 	 * updating the inode.
7461 	 */
7462 	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
7463 	if (!rsv)
7464 		return -ENOMEM;
7465 	rsv->size = min_size;
7466 	rsv->failfast = true;
7467 
7468 	/*
7469 	 * 1 for the truncate slack space
7470 	 * 1 for updating the inode.
7471 	 */
7472 	trans = btrfs_start_transaction(root, 2);
7473 	if (IS_ERR(trans)) {
7474 		ret = PTR_ERR(trans);
7475 		goto out;
7476 	}
7477 
7478 	/* Migrate the slack space for the truncate to our reserve */
7479 	ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
7480 				      min_size, false);
7481 	/*
7482 	 * We have reserved 2 metadata units when we started the transaction and
7483 	 * min_size matches 1 unit, so this should never fail, but if it does,
7484 	 * it's not critical we just fail truncation.
7485 	 */
7486 	if (WARN_ON(ret)) {
7487 		btrfs_end_transaction(trans);
7488 		goto out;
7489 	}
7490 
7491 	trans->block_rsv = rsv;
7492 
7493 	while (1) {
7494 		struct extent_state *cached_state = NULL;
7495 		const u64 new_size = inode->vfs_inode.i_size;
7496 		const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize);
7497 
7498 		control.new_size = new_size;
7499 		lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
7500 		/*
7501 		 * We want to drop from the next block forward in case this new
7502 		 * size is not block aligned since we will be keeping the last
7503 		 * block of the extent just the way it is.
7504 		 */
7505 		btrfs_drop_extent_map_range(inode,
7506 					    ALIGN(new_size, fs_info->sectorsize),
7507 					    (u64)-1, false);
7508 
7509 		ret = btrfs_truncate_inode_items(trans, root, &control);
7510 
7511 		inode_sub_bytes(&inode->vfs_inode, control.sub_bytes);
7512 		btrfs_inode_safe_disk_i_size_write(inode, control.last_size);
7513 
7514 		unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
7515 
7516 		trans->block_rsv = &fs_info->trans_block_rsv;
7517 		if (ret != -ENOSPC && ret != -EAGAIN)
7518 			break;
7519 
7520 		ret = btrfs_update_inode(trans, inode);
7521 		if (ret)
7522 			break;
7523 
7524 		btrfs_end_transaction(trans);
7525 		btrfs_btree_balance_dirty(fs_info);
7526 
7527 		trans = btrfs_start_transaction(root, 2);
7528 		if (IS_ERR(trans)) {
7529 			ret = PTR_ERR(trans);
7530 			trans = NULL;
7531 			break;
7532 		}
7533 
7534 		btrfs_block_rsv_release(fs_info, rsv, -1, NULL);
7535 		ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
7536 					      rsv, min_size, false);
7537 		/*
7538 		 * We have reserved 2 metadata units when we started the
7539 		 * transaction and min_size matches 1 unit, so this should never
7540 		 * fail, but if it does, it's not critical we just fail truncation.
7541 		 */
7542 		if (WARN_ON(ret))
7543 			break;
7544 
7545 		trans->block_rsv = rsv;
7546 	}
7547 
7548 	/*
7549 	 * We can't call btrfs_truncate_block inside a trans handle as we could
7550 	 * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we
7551 	 * know we've truncated everything except the last little bit, and can
7552 	 * do btrfs_truncate_block and then update the disk_i_size.
7553 	 */
7554 	if (ret == BTRFS_NEED_TRUNCATE_BLOCK) {
7555 		btrfs_end_transaction(trans);
7556 		btrfs_btree_balance_dirty(fs_info);
7557 
7558 		ret = btrfs_truncate_block(inode, inode->vfs_inode.i_size, 0, 0);
7559 		if (ret)
7560 			goto out;
7561 		trans = btrfs_start_transaction(root, 1);
7562 		if (IS_ERR(trans)) {
7563 			ret = PTR_ERR(trans);
7564 			goto out;
7565 		}
7566 		btrfs_inode_safe_disk_i_size_write(inode, 0);
7567 	}
7568 
7569 	if (trans) {
7570 		int ret2;
7571 
7572 		trans->block_rsv = &fs_info->trans_block_rsv;
7573 		ret2 = btrfs_update_inode(trans, inode);
7574 		if (ret2 && !ret)
7575 			ret = ret2;
7576 
7577 		ret2 = btrfs_end_transaction(trans);
7578 		if (ret2 && !ret)
7579 			ret = ret2;
7580 		btrfs_btree_balance_dirty(fs_info);
7581 	}
7582 out:
7583 	btrfs_free_block_rsv(fs_info, rsv);
7584 	/*
7585 	 * So if we truncate and then write and fsync we normally would just
7586 	 * write the extents that changed, which is a problem if we need to
7587 	 * first truncate that entire inode.  So set this flag so we write out
7588 	 * all of the extents in the inode to the sync log so we're completely
7589 	 * safe.
7590 	 *
7591 	 * If no extents were dropped or trimmed we don't need to force the next
7592 	 * fsync to truncate all the inode's items from the log and re-log them
7593 	 * all. This means the truncate operation did not change the file size,
7594 	 * or changed it to a smaller size but there was only an implicit hole
7595 	 * between the old i_size and the new i_size, and there were no prealloc
7596 	 * extents beyond i_size to drop.
7597 	 */
7598 	if (control.extents_found > 0)
7599 		btrfs_set_inode_full_sync(inode);
7600 
7601 	return ret;
7602 }
7603 
7604 struct inode *btrfs_new_subvol_inode(struct mnt_idmap *idmap,
7605 				     struct inode *dir)
7606 {
7607 	struct inode *inode;
7608 
7609 	inode = new_inode(dir->i_sb);
7610 	if (inode) {
7611 		/*
7612 		 * Subvolumes don't inherit the sgid bit or the parent's gid if
7613 		 * the parent's sgid bit is set. This is probably a bug.
7614 		 */
7615 		inode_init_owner(idmap, inode, NULL,
7616 				 S_IFDIR | (~current_umask() & S_IRWXUGO));
7617 		inode->i_op = &btrfs_dir_inode_operations;
7618 		inode->i_fop = &btrfs_dir_file_operations;
7619 	}
7620 	return inode;
7621 }
7622 
7623 struct inode *btrfs_alloc_inode(struct super_block *sb)
7624 {
7625 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
7626 	struct btrfs_inode *ei;
7627 	struct inode *inode;
7628 
7629 	ei = alloc_inode_sb(sb, btrfs_inode_cachep, GFP_KERNEL);
7630 	if (!ei)
7631 		return NULL;
7632 
7633 	ei->root = NULL;
7634 	ei->generation = 0;
7635 	ei->last_trans = 0;
7636 	ei->last_sub_trans = 0;
7637 	ei->logged_trans = 0;
7638 	ei->delalloc_bytes = 0;
7639 	ei->new_delalloc_bytes = 0;
7640 	ei->defrag_bytes = 0;
7641 	ei->disk_i_size = 0;
7642 	ei->flags = 0;
7643 	ei->ro_flags = 0;
7644 	/*
7645 	 * ->index_cnt will be properly initialized later when creating a new
7646 	 * inode (btrfs_create_new_inode()) or when reading an existing inode
7647 	 * from disk (btrfs_read_locked_inode()).
7648 	 */
7649 	ei->csum_bytes = 0;
7650 	ei->dir_index = 0;
7651 	ei->last_unlink_trans = 0;
7652 	ei->last_reflink_trans = 0;
7653 	ei->last_log_commit = 0;
7654 
7655 	spin_lock_init(&ei->lock);
7656 	ei->outstanding_extents = 0;
7657 	if (sb->s_magic != BTRFS_TEST_MAGIC)
7658 		btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv,
7659 					      BTRFS_BLOCK_RSV_DELALLOC);
7660 	ei->runtime_flags = 0;
7661 	ei->prop_compress = BTRFS_COMPRESS_NONE;
7662 	ei->defrag_compress = BTRFS_COMPRESS_NONE;
7663 
7664 	ei->delayed_node = NULL;
7665 
7666 	ei->i_otime_sec = 0;
7667 	ei->i_otime_nsec = 0;
7668 
7669 	inode = &ei->vfs_inode;
7670 	extent_map_tree_init(&ei->extent_tree);
7671 
7672 	/* This io tree sets the valid inode. */
7673 	extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO);
7674 	ei->io_tree.inode = ei;
7675 
7676 	ei->file_extent_tree = NULL;
7677 
7678 	mutex_init(&ei->log_mutex);
7679 	spin_lock_init(&ei->ordered_tree_lock);
7680 	ei->ordered_tree = RB_ROOT;
7681 	ei->ordered_tree_last = NULL;
7682 	INIT_LIST_HEAD(&ei->delalloc_inodes);
7683 	INIT_LIST_HEAD(&ei->delayed_iput);
7684 	init_rwsem(&ei->i_mmap_lock);
7685 
7686 	return inode;
7687 }
7688 
7689 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
7690 void btrfs_test_destroy_inode(struct inode *inode)
7691 {
7692 	btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
7693 	kfree(BTRFS_I(inode)->file_extent_tree);
7694 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
7695 }
7696 #endif
7697 
7698 void btrfs_free_inode(struct inode *inode)
7699 {
7700 	kfree(BTRFS_I(inode)->file_extent_tree);
7701 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
7702 }
7703 
7704 void btrfs_destroy_inode(struct inode *vfs_inode)
7705 {
7706 	struct btrfs_ordered_extent *ordered;
7707 	struct btrfs_inode *inode = BTRFS_I(vfs_inode);
7708 	struct btrfs_root *root = inode->root;
7709 	bool freespace_inode;
7710 
7711 	WARN_ON(!hlist_empty(&vfs_inode->i_dentry));
7712 	WARN_ON(vfs_inode->i_data.nrpages);
7713 	WARN_ON(inode->block_rsv.reserved);
7714 	WARN_ON(inode->block_rsv.size);
7715 	WARN_ON(inode->outstanding_extents);
7716 	if (!S_ISDIR(vfs_inode->i_mode)) {
7717 		WARN_ON(inode->delalloc_bytes);
7718 		WARN_ON(inode->new_delalloc_bytes);
7719 		WARN_ON(inode->csum_bytes);
7720 	}
7721 	if (!root || !btrfs_is_data_reloc_root(root))
7722 		WARN_ON(inode->defrag_bytes);
7723 
7724 	/*
7725 	 * This can happen where we create an inode, but somebody else also
7726 	 * created the same inode and we need to destroy the one we already
7727 	 * created.
7728 	 */
7729 	if (!root)
7730 		return;
7731 
7732 	/*
7733 	 * If this is a free space inode do not take the ordered extents lockdep
7734 	 * map.
7735 	 */
7736 	freespace_inode = btrfs_is_free_space_inode(inode);
7737 
7738 	while (1) {
7739 		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
7740 		if (!ordered)
7741 			break;
7742 		else {
7743 			btrfs_err(root->fs_info,
7744 				  "found ordered extent %llu %llu on inode cleanup",
7745 				  ordered->file_offset, ordered->num_bytes);
7746 
7747 			if (!freespace_inode)
7748 				btrfs_lockdep_acquire(root->fs_info, btrfs_ordered_extent);
7749 
7750 			btrfs_remove_ordered_extent(inode, ordered);
7751 			btrfs_put_ordered_extent(ordered);
7752 			btrfs_put_ordered_extent(ordered);
7753 		}
7754 	}
7755 	btrfs_qgroup_check_reserved_leak(inode);
7756 	btrfs_del_inode_from_root(inode);
7757 	btrfs_drop_extent_map_range(inode, 0, (u64)-1, false);
7758 	btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1);
7759 	btrfs_put_root(inode->root);
7760 }
7761 
7762 int btrfs_drop_inode(struct inode *inode)
7763 {
7764 	struct btrfs_root *root = BTRFS_I(inode)->root;
7765 
7766 	if (root == NULL)
7767 		return 1;
7768 
7769 	/* the snap/subvol tree is on deleting */
7770 	if (btrfs_root_refs(&root->root_item) == 0)
7771 		return 1;
7772 	else
7773 		return generic_drop_inode(inode);
7774 }
7775 
7776 static void init_once(void *foo)
7777 {
7778 	struct btrfs_inode *ei = foo;
7779 
7780 	inode_init_once(&ei->vfs_inode);
7781 }
7782 
7783 void __cold btrfs_destroy_cachep(void)
7784 {
7785 	/*
7786 	 * Make sure all delayed rcu free inodes are flushed before we
7787 	 * destroy cache.
7788 	 */
7789 	rcu_barrier();
7790 	kmem_cache_destroy(btrfs_inode_cachep);
7791 }
7792 
7793 int __init btrfs_init_cachep(void)
7794 {
7795 	btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
7796 			sizeof(struct btrfs_inode), 0,
7797 			SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT,
7798 			init_once);
7799 	if (!btrfs_inode_cachep)
7800 		return -ENOMEM;
7801 
7802 	return 0;
7803 }
7804 
7805 static int btrfs_getattr(struct mnt_idmap *idmap,
7806 			 const struct path *path, struct kstat *stat,
7807 			 u32 request_mask, unsigned int flags)
7808 {
7809 	u64 delalloc_bytes;
7810 	u64 inode_bytes;
7811 	struct inode *inode = d_inode(path->dentry);
7812 	u32 blocksize = btrfs_sb(inode->i_sb)->sectorsize;
7813 	u32 bi_flags = BTRFS_I(inode)->flags;
7814 	u32 bi_ro_flags = BTRFS_I(inode)->ro_flags;
7815 
7816 	stat->result_mask |= STATX_BTIME;
7817 	stat->btime.tv_sec = BTRFS_I(inode)->i_otime_sec;
7818 	stat->btime.tv_nsec = BTRFS_I(inode)->i_otime_nsec;
7819 	if (bi_flags & BTRFS_INODE_APPEND)
7820 		stat->attributes |= STATX_ATTR_APPEND;
7821 	if (bi_flags & BTRFS_INODE_COMPRESS)
7822 		stat->attributes |= STATX_ATTR_COMPRESSED;
7823 	if (bi_flags & BTRFS_INODE_IMMUTABLE)
7824 		stat->attributes |= STATX_ATTR_IMMUTABLE;
7825 	if (bi_flags & BTRFS_INODE_NODUMP)
7826 		stat->attributes |= STATX_ATTR_NODUMP;
7827 	if (bi_ro_flags & BTRFS_INODE_RO_VERITY)
7828 		stat->attributes |= STATX_ATTR_VERITY;
7829 
7830 	stat->attributes_mask |= (STATX_ATTR_APPEND |
7831 				  STATX_ATTR_COMPRESSED |
7832 				  STATX_ATTR_IMMUTABLE |
7833 				  STATX_ATTR_NODUMP);
7834 
7835 	generic_fillattr(idmap, request_mask, inode, stat);
7836 	stat->dev = BTRFS_I(inode)->root->anon_dev;
7837 
7838 	stat->subvol = BTRFS_I(inode)->root->root_key.objectid;
7839 	stat->result_mask |= STATX_SUBVOL;
7840 
7841 	spin_lock(&BTRFS_I(inode)->lock);
7842 	delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes;
7843 	inode_bytes = inode_get_bytes(inode);
7844 	spin_unlock(&BTRFS_I(inode)->lock);
7845 	stat->blocks = (ALIGN(inode_bytes, blocksize) +
7846 			ALIGN(delalloc_bytes, blocksize)) >> SECTOR_SHIFT;
7847 	return 0;
7848 }
7849 
7850 static int btrfs_rename_exchange(struct inode *old_dir,
7851 			      struct dentry *old_dentry,
7852 			      struct inode *new_dir,
7853 			      struct dentry *new_dentry)
7854 {
7855 	struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir);
7856 	struct btrfs_trans_handle *trans;
7857 	unsigned int trans_num_items;
7858 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
7859 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
7860 	struct inode *new_inode = new_dentry->d_inode;
7861 	struct inode *old_inode = old_dentry->d_inode;
7862 	struct btrfs_rename_ctx old_rename_ctx;
7863 	struct btrfs_rename_ctx new_rename_ctx;
7864 	u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
7865 	u64 new_ino = btrfs_ino(BTRFS_I(new_inode));
7866 	u64 old_idx = 0;
7867 	u64 new_idx = 0;
7868 	int ret;
7869 	int ret2;
7870 	bool need_abort = false;
7871 	struct fscrypt_name old_fname, new_fname;
7872 	struct fscrypt_str *old_name, *new_name;
7873 
7874 	/*
7875 	 * For non-subvolumes allow exchange only within one subvolume, in the
7876 	 * same inode namespace. Two subvolumes (represented as directory) can
7877 	 * be exchanged as they're a logical link and have a fixed inode number.
7878 	 */
7879 	if (root != dest &&
7880 	    (old_ino != BTRFS_FIRST_FREE_OBJECTID ||
7881 	     new_ino != BTRFS_FIRST_FREE_OBJECTID))
7882 		return -EXDEV;
7883 
7884 	ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname);
7885 	if (ret)
7886 		return ret;
7887 
7888 	ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname);
7889 	if (ret) {
7890 		fscrypt_free_filename(&old_fname);
7891 		return ret;
7892 	}
7893 
7894 	old_name = &old_fname.disk_name;
7895 	new_name = &new_fname.disk_name;
7896 
7897 	/* close the race window with snapshot create/destroy ioctl */
7898 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID ||
7899 	    new_ino == BTRFS_FIRST_FREE_OBJECTID)
7900 		down_read(&fs_info->subvol_sem);
7901 
7902 	/*
7903 	 * For each inode:
7904 	 * 1 to remove old dir item
7905 	 * 1 to remove old dir index
7906 	 * 1 to add new dir item
7907 	 * 1 to add new dir index
7908 	 * 1 to update parent inode
7909 	 *
7910 	 * If the parents are the same, we only need to account for one
7911 	 */
7912 	trans_num_items = (old_dir == new_dir ? 9 : 10);
7913 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
7914 		/*
7915 		 * 1 to remove old root ref
7916 		 * 1 to remove old root backref
7917 		 * 1 to add new root ref
7918 		 * 1 to add new root backref
7919 		 */
7920 		trans_num_items += 4;
7921 	} else {
7922 		/*
7923 		 * 1 to update inode item
7924 		 * 1 to remove old inode ref
7925 		 * 1 to add new inode ref
7926 		 */
7927 		trans_num_items += 3;
7928 	}
7929 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
7930 		trans_num_items += 4;
7931 	else
7932 		trans_num_items += 3;
7933 	trans = btrfs_start_transaction(root, trans_num_items);
7934 	if (IS_ERR(trans)) {
7935 		ret = PTR_ERR(trans);
7936 		goto out_notrans;
7937 	}
7938 
7939 	if (dest != root) {
7940 		ret = btrfs_record_root_in_trans(trans, dest);
7941 		if (ret)
7942 			goto out_fail;
7943 	}
7944 
7945 	/*
7946 	 * We need to find a free sequence number both in the source and
7947 	 * in the destination directory for the exchange.
7948 	 */
7949 	ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx);
7950 	if (ret)
7951 		goto out_fail;
7952 	ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx);
7953 	if (ret)
7954 		goto out_fail;
7955 
7956 	BTRFS_I(old_inode)->dir_index = 0ULL;
7957 	BTRFS_I(new_inode)->dir_index = 0ULL;
7958 
7959 	/* Reference for the source. */
7960 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
7961 		/* force full log commit if subvolume involved. */
7962 		btrfs_set_log_full_commit(trans);
7963 	} else {
7964 		ret = btrfs_insert_inode_ref(trans, dest, new_name, old_ino,
7965 					     btrfs_ino(BTRFS_I(new_dir)),
7966 					     old_idx);
7967 		if (ret)
7968 			goto out_fail;
7969 		need_abort = true;
7970 	}
7971 
7972 	/* And now for the dest. */
7973 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
7974 		/* force full log commit if subvolume involved. */
7975 		btrfs_set_log_full_commit(trans);
7976 	} else {
7977 		ret = btrfs_insert_inode_ref(trans, root, old_name, new_ino,
7978 					     btrfs_ino(BTRFS_I(old_dir)),
7979 					     new_idx);
7980 		if (ret) {
7981 			if (need_abort)
7982 				btrfs_abort_transaction(trans, ret);
7983 			goto out_fail;
7984 		}
7985 	}
7986 
7987 	/* Update inode version and ctime/mtime. */
7988 	inode_inc_iversion(old_dir);
7989 	inode_inc_iversion(new_dir);
7990 	inode_inc_iversion(old_inode);
7991 	inode_inc_iversion(new_inode);
7992 	simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
7993 
7994 	if (old_dentry->d_parent != new_dentry->d_parent) {
7995 		btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
7996 					BTRFS_I(old_inode), true);
7997 		btrfs_record_unlink_dir(trans, BTRFS_I(new_dir),
7998 					BTRFS_I(new_inode), true);
7999 	}
8000 
8001 	/* src is a subvolume */
8002 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8003 		ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry);
8004 	} else { /* src is an inode */
8005 		ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
8006 					   BTRFS_I(old_dentry->d_inode),
8007 					   old_name, &old_rename_ctx);
8008 		if (!ret)
8009 			ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
8010 	}
8011 	if (ret) {
8012 		btrfs_abort_transaction(trans, ret);
8013 		goto out_fail;
8014 	}
8015 
8016 	/* dest is a subvolume */
8017 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
8018 		ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
8019 	} else { /* dest is an inode */
8020 		ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir),
8021 					   BTRFS_I(new_dentry->d_inode),
8022 					   new_name, &new_rename_ctx);
8023 		if (!ret)
8024 			ret = btrfs_update_inode(trans, BTRFS_I(new_inode));
8025 	}
8026 	if (ret) {
8027 		btrfs_abort_transaction(trans, ret);
8028 		goto out_fail;
8029 	}
8030 
8031 	ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
8032 			     new_name, 0, old_idx);
8033 	if (ret) {
8034 		btrfs_abort_transaction(trans, ret);
8035 		goto out_fail;
8036 	}
8037 
8038 	ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode),
8039 			     old_name, 0, new_idx);
8040 	if (ret) {
8041 		btrfs_abort_transaction(trans, ret);
8042 		goto out_fail;
8043 	}
8044 
8045 	if (old_inode->i_nlink == 1)
8046 		BTRFS_I(old_inode)->dir_index = old_idx;
8047 	if (new_inode->i_nlink == 1)
8048 		BTRFS_I(new_inode)->dir_index = new_idx;
8049 
8050 	/*
8051 	 * Now pin the logs of the roots. We do it to ensure that no other task
8052 	 * can sync the logs while we are in progress with the rename, because
8053 	 * that could result in an inconsistency in case any of the inodes that
8054 	 * are part of this rename operation were logged before.
8055 	 */
8056 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
8057 		btrfs_pin_log_trans(root);
8058 	if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
8059 		btrfs_pin_log_trans(dest);
8060 
8061 	/* Do the log updates for all inodes. */
8062 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
8063 		btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
8064 				   old_rename_ctx.index, new_dentry->d_parent);
8065 	if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
8066 		btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir),
8067 				   new_rename_ctx.index, old_dentry->d_parent);
8068 
8069 	/* Now unpin the logs. */
8070 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
8071 		btrfs_end_log_trans(root);
8072 	if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
8073 		btrfs_end_log_trans(dest);
8074 out_fail:
8075 	ret2 = btrfs_end_transaction(trans);
8076 	ret = ret ? ret : ret2;
8077 out_notrans:
8078 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID ||
8079 	    old_ino == BTRFS_FIRST_FREE_OBJECTID)
8080 		up_read(&fs_info->subvol_sem);
8081 
8082 	fscrypt_free_filename(&new_fname);
8083 	fscrypt_free_filename(&old_fname);
8084 	return ret;
8085 }
8086 
8087 static struct inode *new_whiteout_inode(struct mnt_idmap *idmap,
8088 					struct inode *dir)
8089 {
8090 	struct inode *inode;
8091 
8092 	inode = new_inode(dir->i_sb);
8093 	if (inode) {
8094 		inode_init_owner(idmap, inode, dir,
8095 				 S_IFCHR | WHITEOUT_MODE);
8096 		inode->i_op = &btrfs_special_inode_operations;
8097 		init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
8098 	}
8099 	return inode;
8100 }
8101 
8102 static int btrfs_rename(struct mnt_idmap *idmap,
8103 			struct inode *old_dir, struct dentry *old_dentry,
8104 			struct inode *new_dir, struct dentry *new_dentry,
8105 			unsigned int flags)
8106 {
8107 	struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir);
8108 	struct btrfs_new_inode_args whiteout_args = {
8109 		.dir = old_dir,
8110 		.dentry = old_dentry,
8111 	};
8112 	struct btrfs_trans_handle *trans;
8113 	unsigned int trans_num_items;
8114 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
8115 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
8116 	struct inode *new_inode = d_inode(new_dentry);
8117 	struct inode *old_inode = d_inode(old_dentry);
8118 	struct btrfs_rename_ctx rename_ctx;
8119 	u64 index = 0;
8120 	int ret;
8121 	int ret2;
8122 	u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
8123 	struct fscrypt_name old_fname, new_fname;
8124 
8125 	if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
8126 		return -EPERM;
8127 
8128 	/* we only allow rename subvolume link between subvolumes */
8129 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
8130 		return -EXDEV;
8131 
8132 	if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
8133 	    (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID))
8134 		return -ENOTEMPTY;
8135 
8136 	if (S_ISDIR(old_inode->i_mode) && new_inode &&
8137 	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
8138 		return -ENOTEMPTY;
8139 
8140 	ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname);
8141 	if (ret)
8142 		return ret;
8143 
8144 	ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname);
8145 	if (ret) {
8146 		fscrypt_free_filename(&old_fname);
8147 		return ret;
8148 	}
8149 
8150 	/* check for collisions, even if the  name isn't there */
8151 	ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, &new_fname.disk_name);
8152 	if (ret) {
8153 		if (ret == -EEXIST) {
8154 			/* we shouldn't get
8155 			 * eexist without a new_inode */
8156 			if (WARN_ON(!new_inode)) {
8157 				goto out_fscrypt_names;
8158 			}
8159 		} else {
8160 			/* maybe -EOVERFLOW */
8161 			goto out_fscrypt_names;
8162 		}
8163 	}
8164 	ret = 0;
8165 
8166 	/*
8167 	 * we're using rename to replace one file with another.  Start IO on it
8168 	 * now so  we don't add too much work to the end of the transaction
8169 	 */
8170 	if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
8171 		filemap_flush(old_inode->i_mapping);
8172 
8173 	if (flags & RENAME_WHITEOUT) {
8174 		whiteout_args.inode = new_whiteout_inode(idmap, old_dir);
8175 		if (!whiteout_args.inode) {
8176 			ret = -ENOMEM;
8177 			goto out_fscrypt_names;
8178 		}
8179 		ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items);
8180 		if (ret)
8181 			goto out_whiteout_inode;
8182 	} else {
8183 		/* 1 to update the old parent inode. */
8184 		trans_num_items = 1;
8185 	}
8186 
8187 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8188 		/* Close the race window with snapshot create/destroy ioctl */
8189 		down_read(&fs_info->subvol_sem);
8190 		/*
8191 		 * 1 to remove old root ref
8192 		 * 1 to remove old root backref
8193 		 * 1 to add new root ref
8194 		 * 1 to add new root backref
8195 		 */
8196 		trans_num_items += 4;
8197 	} else {
8198 		/*
8199 		 * 1 to update inode
8200 		 * 1 to remove old inode ref
8201 		 * 1 to add new inode ref
8202 		 */
8203 		trans_num_items += 3;
8204 	}
8205 	/*
8206 	 * 1 to remove old dir item
8207 	 * 1 to remove old dir index
8208 	 * 1 to add new dir item
8209 	 * 1 to add new dir index
8210 	 */
8211 	trans_num_items += 4;
8212 	/* 1 to update new parent inode if it's not the same as the old parent */
8213 	if (new_dir != old_dir)
8214 		trans_num_items++;
8215 	if (new_inode) {
8216 		/*
8217 		 * 1 to update inode
8218 		 * 1 to remove inode ref
8219 		 * 1 to remove dir item
8220 		 * 1 to remove dir index
8221 		 * 1 to possibly add orphan item
8222 		 */
8223 		trans_num_items += 5;
8224 	}
8225 	trans = btrfs_start_transaction(root, trans_num_items);
8226 	if (IS_ERR(trans)) {
8227 		ret = PTR_ERR(trans);
8228 		goto out_notrans;
8229 	}
8230 
8231 	if (dest != root) {
8232 		ret = btrfs_record_root_in_trans(trans, dest);
8233 		if (ret)
8234 			goto out_fail;
8235 	}
8236 
8237 	ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index);
8238 	if (ret)
8239 		goto out_fail;
8240 
8241 	BTRFS_I(old_inode)->dir_index = 0ULL;
8242 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
8243 		/* force full log commit if subvolume involved. */
8244 		btrfs_set_log_full_commit(trans);
8245 	} else {
8246 		ret = btrfs_insert_inode_ref(trans, dest, &new_fname.disk_name,
8247 					     old_ino, btrfs_ino(BTRFS_I(new_dir)),
8248 					     index);
8249 		if (ret)
8250 			goto out_fail;
8251 	}
8252 
8253 	inode_inc_iversion(old_dir);
8254 	inode_inc_iversion(new_dir);
8255 	inode_inc_iversion(old_inode);
8256 	simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
8257 
8258 	if (old_dentry->d_parent != new_dentry->d_parent)
8259 		btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
8260 					BTRFS_I(old_inode), true);
8261 
8262 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
8263 		ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry);
8264 	} else {
8265 		ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
8266 					   BTRFS_I(d_inode(old_dentry)),
8267 					   &old_fname.disk_name, &rename_ctx);
8268 		if (!ret)
8269 			ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
8270 	}
8271 	if (ret) {
8272 		btrfs_abort_transaction(trans, ret);
8273 		goto out_fail;
8274 	}
8275 
8276 	if (new_inode) {
8277 		inode_inc_iversion(new_inode);
8278 		if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
8279 			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
8280 			ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
8281 			BUG_ON(new_inode->i_nlink == 0);
8282 		} else {
8283 			ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir),
8284 						 BTRFS_I(d_inode(new_dentry)),
8285 						 &new_fname.disk_name);
8286 		}
8287 		if (!ret && new_inode->i_nlink == 0)
8288 			ret = btrfs_orphan_add(trans,
8289 					BTRFS_I(d_inode(new_dentry)));
8290 		if (ret) {
8291 			btrfs_abort_transaction(trans, ret);
8292 			goto out_fail;
8293 		}
8294 	}
8295 
8296 	ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
8297 			     &new_fname.disk_name, 0, index);
8298 	if (ret) {
8299 		btrfs_abort_transaction(trans, ret);
8300 		goto out_fail;
8301 	}
8302 
8303 	if (old_inode->i_nlink == 1)
8304 		BTRFS_I(old_inode)->dir_index = index;
8305 
8306 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
8307 		btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
8308 				   rename_ctx.index, new_dentry->d_parent);
8309 
8310 	if (flags & RENAME_WHITEOUT) {
8311 		ret = btrfs_create_new_inode(trans, &whiteout_args);
8312 		if (ret) {
8313 			btrfs_abort_transaction(trans, ret);
8314 			goto out_fail;
8315 		} else {
8316 			unlock_new_inode(whiteout_args.inode);
8317 			iput(whiteout_args.inode);
8318 			whiteout_args.inode = NULL;
8319 		}
8320 	}
8321 out_fail:
8322 	ret2 = btrfs_end_transaction(trans);
8323 	ret = ret ? ret : ret2;
8324 out_notrans:
8325 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
8326 		up_read(&fs_info->subvol_sem);
8327 	if (flags & RENAME_WHITEOUT)
8328 		btrfs_new_inode_args_destroy(&whiteout_args);
8329 out_whiteout_inode:
8330 	if (flags & RENAME_WHITEOUT)
8331 		iput(whiteout_args.inode);
8332 out_fscrypt_names:
8333 	fscrypt_free_filename(&old_fname);
8334 	fscrypt_free_filename(&new_fname);
8335 	return ret;
8336 }
8337 
8338 static int btrfs_rename2(struct mnt_idmap *idmap, struct inode *old_dir,
8339 			 struct dentry *old_dentry, struct inode *new_dir,
8340 			 struct dentry *new_dentry, unsigned int flags)
8341 {
8342 	int ret;
8343 
8344 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
8345 		return -EINVAL;
8346 
8347 	if (flags & RENAME_EXCHANGE)
8348 		ret = btrfs_rename_exchange(old_dir, old_dentry, new_dir,
8349 					    new_dentry);
8350 	else
8351 		ret = btrfs_rename(idmap, old_dir, old_dentry, new_dir,
8352 				   new_dentry, flags);
8353 
8354 	btrfs_btree_balance_dirty(BTRFS_I(new_dir)->root->fs_info);
8355 
8356 	return ret;
8357 }
8358 
8359 struct btrfs_delalloc_work {
8360 	struct inode *inode;
8361 	struct completion completion;
8362 	struct list_head list;
8363 	struct btrfs_work work;
8364 };
8365 
8366 static void btrfs_run_delalloc_work(struct btrfs_work *work)
8367 {
8368 	struct btrfs_delalloc_work *delalloc_work;
8369 	struct inode *inode;
8370 
8371 	delalloc_work = container_of(work, struct btrfs_delalloc_work,
8372 				     work);
8373 	inode = delalloc_work->inode;
8374 	filemap_flush(inode->i_mapping);
8375 	if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
8376 				&BTRFS_I(inode)->runtime_flags))
8377 		filemap_flush(inode->i_mapping);
8378 
8379 	iput(inode);
8380 	complete(&delalloc_work->completion);
8381 }
8382 
8383 static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode)
8384 {
8385 	struct btrfs_delalloc_work *work;
8386 
8387 	work = kmalloc(sizeof(*work), GFP_NOFS);
8388 	if (!work)
8389 		return NULL;
8390 
8391 	init_completion(&work->completion);
8392 	INIT_LIST_HEAD(&work->list);
8393 	work->inode = inode;
8394 	btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL);
8395 
8396 	return work;
8397 }
8398 
8399 /*
8400  * some fairly slow code that needs optimization. This walks the list
8401  * of all the inodes with pending delalloc and forces them to disk.
8402  */
8403 static int start_delalloc_inodes(struct btrfs_root *root,
8404 				 struct writeback_control *wbc, bool snapshot,
8405 				 bool in_reclaim_context)
8406 {
8407 	struct btrfs_inode *binode;
8408 	struct inode *inode;
8409 	struct btrfs_delalloc_work *work, *next;
8410 	LIST_HEAD(works);
8411 	LIST_HEAD(splice);
8412 	int ret = 0;
8413 	bool full_flush = wbc->nr_to_write == LONG_MAX;
8414 
8415 	mutex_lock(&root->delalloc_mutex);
8416 	spin_lock(&root->delalloc_lock);
8417 	list_splice_init(&root->delalloc_inodes, &splice);
8418 	while (!list_empty(&splice)) {
8419 		binode = list_entry(splice.next, struct btrfs_inode,
8420 				    delalloc_inodes);
8421 
8422 		list_move_tail(&binode->delalloc_inodes,
8423 			       &root->delalloc_inodes);
8424 
8425 		if (in_reclaim_context &&
8426 		    test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags))
8427 			continue;
8428 
8429 		inode = igrab(&binode->vfs_inode);
8430 		if (!inode) {
8431 			cond_resched_lock(&root->delalloc_lock);
8432 			continue;
8433 		}
8434 		spin_unlock(&root->delalloc_lock);
8435 
8436 		if (snapshot)
8437 			set_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
8438 				&binode->runtime_flags);
8439 		if (full_flush) {
8440 			work = btrfs_alloc_delalloc_work(inode);
8441 			if (!work) {
8442 				iput(inode);
8443 				ret = -ENOMEM;
8444 				goto out;
8445 			}
8446 			list_add_tail(&work->list, &works);
8447 			btrfs_queue_work(root->fs_info->flush_workers,
8448 					 &work->work);
8449 		} else {
8450 			ret = filemap_fdatawrite_wbc(inode->i_mapping, wbc);
8451 			btrfs_add_delayed_iput(BTRFS_I(inode));
8452 			if (ret || wbc->nr_to_write <= 0)
8453 				goto out;
8454 		}
8455 		cond_resched();
8456 		spin_lock(&root->delalloc_lock);
8457 	}
8458 	spin_unlock(&root->delalloc_lock);
8459 
8460 out:
8461 	list_for_each_entry_safe(work, next, &works, list) {
8462 		list_del_init(&work->list);
8463 		wait_for_completion(&work->completion);
8464 		kfree(work);
8465 	}
8466 
8467 	if (!list_empty(&splice)) {
8468 		spin_lock(&root->delalloc_lock);
8469 		list_splice_tail(&splice, &root->delalloc_inodes);
8470 		spin_unlock(&root->delalloc_lock);
8471 	}
8472 	mutex_unlock(&root->delalloc_mutex);
8473 	return ret;
8474 }
8475 
8476 int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context)
8477 {
8478 	struct writeback_control wbc = {
8479 		.nr_to_write = LONG_MAX,
8480 		.sync_mode = WB_SYNC_NONE,
8481 		.range_start = 0,
8482 		.range_end = LLONG_MAX,
8483 	};
8484 	struct btrfs_fs_info *fs_info = root->fs_info;
8485 
8486 	if (BTRFS_FS_ERROR(fs_info))
8487 		return -EROFS;
8488 
8489 	return start_delalloc_inodes(root, &wbc, true, in_reclaim_context);
8490 }
8491 
8492 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
8493 			       bool in_reclaim_context)
8494 {
8495 	struct writeback_control wbc = {
8496 		.nr_to_write = nr,
8497 		.sync_mode = WB_SYNC_NONE,
8498 		.range_start = 0,
8499 		.range_end = LLONG_MAX,
8500 	};
8501 	struct btrfs_root *root;
8502 	LIST_HEAD(splice);
8503 	int ret;
8504 
8505 	if (BTRFS_FS_ERROR(fs_info))
8506 		return -EROFS;
8507 
8508 	mutex_lock(&fs_info->delalloc_root_mutex);
8509 	spin_lock(&fs_info->delalloc_root_lock);
8510 	list_splice_init(&fs_info->delalloc_roots, &splice);
8511 	while (!list_empty(&splice)) {
8512 		/*
8513 		 * Reset nr_to_write here so we know that we're doing a full
8514 		 * flush.
8515 		 */
8516 		if (nr == LONG_MAX)
8517 			wbc.nr_to_write = LONG_MAX;
8518 
8519 		root = list_first_entry(&splice, struct btrfs_root,
8520 					delalloc_root);
8521 		root = btrfs_grab_root(root);
8522 		BUG_ON(!root);
8523 		list_move_tail(&root->delalloc_root,
8524 			       &fs_info->delalloc_roots);
8525 		spin_unlock(&fs_info->delalloc_root_lock);
8526 
8527 		ret = start_delalloc_inodes(root, &wbc, false, in_reclaim_context);
8528 		btrfs_put_root(root);
8529 		if (ret < 0 || wbc.nr_to_write <= 0)
8530 			goto out;
8531 		spin_lock(&fs_info->delalloc_root_lock);
8532 	}
8533 	spin_unlock(&fs_info->delalloc_root_lock);
8534 
8535 	ret = 0;
8536 out:
8537 	if (!list_empty(&splice)) {
8538 		spin_lock(&fs_info->delalloc_root_lock);
8539 		list_splice_tail(&splice, &fs_info->delalloc_roots);
8540 		spin_unlock(&fs_info->delalloc_root_lock);
8541 	}
8542 	mutex_unlock(&fs_info->delalloc_root_mutex);
8543 	return ret;
8544 }
8545 
8546 static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
8547 			 struct dentry *dentry, const char *symname)
8548 {
8549 	struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
8550 	struct btrfs_trans_handle *trans;
8551 	struct btrfs_root *root = BTRFS_I(dir)->root;
8552 	struct btrfs_path *path;
8553 	struct btrfs_key key;
8554 	struct inode *inode;
8555 	struct btrfs_new_inode_args new_inode_args = {
8556 		.dir = dir,
8557 		.dentry = dentry,
8558 	};
8559 	unsigned int trans_num_items;
8560 	int err;
8561 	int name_len;
8562 	int datasize;
8563 	unsigned long ptr;
8564 	struct btrfs_file_extent_item *ei;
8565 	struct extent_buffer *leaf;
8566 
8567 	name_len = strlen(symname);
8568 	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
8569 		return -ENAMETOOLONG;
8570 
8571 	inode = new_inode(dir->i_sb);
8572 	if (!inode)
8573 		return -ENOMEM;
8574 	inode_init_owner(idmap, inode, dir, S_IFLNK | S_IRWXUGO);
8575 	inode->i_op = &btrfs_symlink_inode_operations;
8576 	inode_nohighmem(inode);
8577 	inode->i_mapping->a_ops = &btrfs_aops;
8578 	btrfs_i_size_write(BTRFS_I(inode), name_len);
8579 	inode_set_bytes(inode, name_len);
8580 
8581 	new_inode_args.inode = inode;
8582 	err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
8583 	if (err)
8584 		goto out_inode;
8585 	/* 1 additional item for the inline extent */
8586 	trans_num_items++;
8587 
8588 	trans = btrfs_start_transaction(root, trans_num_items);
8589 	if (IS_ERR(trans)) {
8590 		err = PTR_ERR(trans);
8591 		goto out_new_inode_args;
8592 	}
8593 
8594 	err = btrfs_create_new_inode(trans, &new_inode_args);
8595 	if (err)
8596 		goto out;
8597 
8598 	path = btrfs_alloc_path();
8599 	if (!path) {
8600 		err = -ENOMEM;
8601 		btrfs_abort_transaction(trans, err);
8602 		discard_new_inode(inode);
8603 		inode = NULL;
8604 		goto out;
8605 	}
8606 	key.objectid = btrfs_ino(BTRFS_I(inode));
8607 	key.offset = 0;
8608 	key.type = BTRFS_EXTENT_DATA_KEY;
8609 	datasize = btrfs_file_extent_calc_inline_size(name_len);
8610 	err = btrfs_insert_empty_item(trans, root, path, &key,
8611 				      datasize);
8612 	if (err) {
8613 		btrfs_abort_transaction(trans, err);
8614 		btrfs_free_path(path);
8615 		discard_new_inode(inode);
8616 		inode = NULL;
8617 		goto out;
8618 	}
8619 	leaf = path->nodes[0];
8620 	ei = btrfs_item_ptr(leaf, path->slots[0],
8621 			    struct btrfs_file_extent_item);
8622 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
8623 	btrfs_set_file_extent_type(leaf, ei,
8624 				   BTRFS_FILE_EXTENT_INLINE);
8625 	btrfs_set_file_extent_encryption(leaf, ei, 0);
8626 	btrfs_set_file_extent_compression(leaf, ei, 0);
8627 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
8628 	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
8629 
8630 	ptr = btrfs_file_extent_inline_start(ei);
8631 	write_extent_buffer(leaf, symname, ptr, name_len);
8632 	btrfs_mark_buffer_dirty(trans, leaf);
8633 	btrfs_free_path(path);
8634 
8635 	d_instantiate_new(dentry, inode);
8636 	err = 0;
8637 out:
8638 	btrfs_end_transaction(trans);
8639 	btrfs_btree_balance_dirty(fs_info);
8640 out_new_inode_args:
8641 	btrfs_new_inode_args_destroy(&new_inode_args);
8642 out_inode:
8643 	if (err)
8644 		iput(inode);
8645 	return err;
8646 }
8647 
8648 static struct btrfs_trans_handle *insert_prealloc_file_extent(
8649 				       struct btrfs_trans_handle *trans_in,
8650 				       struct btrfs_inode *inode,
8651 				       struct btrfs_key *ins,
8652 				       u64 file_offset)
8653 {
8654 	struct btrfs_file_extent_item stack_fi;
8655 	struct btrfs_replace_extent_info extent_info;
8656 	struct btrfs_trans_handle *trans = trans_in;
8657 	struct btrfs_path *path;
8658 	u64 start = ins->objectid;
8659 	u64 len = ins->offset;
8660 	u64 qgroup_released = 0;
8661 	int ret;
8662 
8663 	memset(&stack_fi, 0, sizeof(stack_fi));
8664 
8665 	btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_PREALLOC);
8666 	btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, start);
8667 	btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, len);
8668 	btrfs_set_stack_file_extent_num_bytes(&stack_fi, len);
8669 	btrfs_set_stack_file_extent_ram_bytes(&stack_fi, len);
8670 	btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE);
8671 	/* Encryption and other encoding is reserved and all 0 */
8672 
8673 	ret = btrfs_qgroup_release_data(inode, file_offset, len, &qgroup_released);
8674 	if (ret < 0)
8675 		return ERR_PTR(ret);
8676 
8677 	if (trans) {
8678 		ret = insert_reserved_file_extent(trans, inode,
8679 						  file_offset, &stack_fi,
8680 						  true, qgroup_released);
8681 		if (ret)
8682 			goto free_qgroup;
8683 		return trans;
8684 	}
8685 
8686 	extent_info.disk_offset = start;
8687 	extent_info.disk_len = len;
8688 	extent_info.data_offset = 0;
8689 	extent_info.data_len = len;
8690 	extent_info.file_offset = file_offset;
8691 	extent_info.extent_buf = (char *)&stack_fi;
8692 	extent_info.is_new_extent = true;
8693 	extent_info.update_times = true;
8694 	extent_info.qgroup_reserved = qgroup_released;
8695 	extent_info.insertions = 0;
8696 
8697 	path = btrfs_alloc_path();
8698 	if (!path) {
8699 		ret = -ENOMEM;
8700 		goto free_qgroup;
8701 	}
8702 
8703 	ret = btrfs_replace_file_extents(inode, path, file_offset,
8704 				     file_offset + len - 1, &extent_info,
8705 				     &trans);
8706 	btrfs_free_path(path);
8707 	if (ret)
8708 		goto free_qgroup;
8709 	return trans;
8710 
8711 free_qgroup:
8712 	/*
8713 	 * We have released qgroup data range at the beginning of the function,
8714 	 * and normally qgroup_released bytes will be freed when committing
8715 	 * transaction.
8716 	 * But if we error out early, we have to free what we have released
8717 	 * or we leak qgroup data reservation.
8718 	 */
8719 	btrfs_qgroup_free_refroot(inode->root->fs_info,
8720 			btrfs_root_id(inode->root), qgroup_released,
8721 			BTRFS_QGROUP_RSV_DATA);
8722 	return ERR_PTR(ret);
8723 }
8724 
8725 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
8726 				       u64 start, u64 num_bytes, u64 min_size,
8727 				       loff_t actual_len, u64 *alloc_hint,
8728 				       struct btrfs_trans_handle *trans)
8729 {
8730 	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
8731 	struct extent_map *em;
8732 	struct btrfs_root *root = BTRFS_I(inode)->root;
8733 	struct btrfs_key ins;
8734 	u64 cur_offset = start;
8735 	u64 clear_offset = start;
8736 	u64 i_size;
8737 	u64 cur_bytes;
8738 	u64 last_alloc = (u64)-1;
8739 	int ret = 0;
8740 	bool own_trans = true;
8741 	u64 end = start + num_bytes - 1;
8742 
8743 	if (trans)
8744 		own_trans = false;
8745 	while (num_bytes > 0) {
8746 		cur_bytes = min_t(u64, num_bytes, SZ_256M);
8747 		cur_bytes = max(cur_bytes, min_size);
8748 		/*
8749 		 * If we are severely fragmented we could end up with really
8750 		 * small allocations, so if the allocator is returning small
8751 		 * chunks lets make its job easier by only searching for those
8752 		 * sized chunks.
8753 		 */
8754 		cur_bytes = min(cur_bytes, last_alloc);
8755 		ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes,
8756 				min_size, 0, *alloc_hint, &ins, 1, 0);
8757 		if (ret)
8758 			break;
8759 
8760 		/*
8761 		 * We've reserved this space, and thus converted it from
8762 		 * ->bytes_may_use to ->bytes_reserved.  Any error that happens
8763 		 * from here on out we will only need to clear our reservation
8764 		 * for the remaining unreserved area, so advance our
8765 		 * clear_offset by our extent size.
8766 		 */
8767 		clear_offset += ins.offset;
8768 
8769 		last_alloc = ins.offset;
8770 		trans = insert_prealloc_file_extent(trans, BTRFS_I(inode),
8771 						    &ins, cur_offset);
8772 		/*
8773 		 * Now that we inserted the prealloc extent we can finally
8774 		 * decrement the number of reservations in the block group.
8775 		 * If we did it before, we could race with relocation and have
8776 		 * relocation miss the reserved extent, making it fail later.
8777 		 */
8778 		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
8779 		if (IS_ERR(trans)) {
8780 			ret = PTR_ERR(trans);
8781 			btrfs_free_reserved_extent(fs_info, ins.objectid,
8782 						   ins.offset, 0);
8783 			break;
8784 		}
8785 
8786 		em = alloc_extent_map();
8787 		if (!em) {
8788 			btrfs_drop_extent_map_range(BTRFS_I(inode), cur_offset,
8789 					    cur_offset + ins.offset - 1, false);
8790 			btrfs_set_inode_full_sync(BTRFS_I(inode));
8791 			goto next;
8792 		}
8793 
8794 		em->start = cur_offset;
8795 		em->len = ins.offset;
8796 		em->disk_bytenr = ins.objectid;
8797 		em->offset = 0;
8798 		em->disk_num_bytes = ins.offset;
8799 		em->ram_bytes = ins.offset;
8800 		em->flags |= EXTENT_FLAG_PREALLOC;
8801 		em->generation = trans->transid;
8802 
8803 		ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, true);
8804 		free_extent_map(em);
8805 next:
8806 		num_bytes -= ins.offset;
8807 		cur_offset += ins.offset;
8808 		*alloc_hint = ins.objectid + ins.offset;
8809 
8810 		inode_inc_iversion(inode);
8811 		inode_set_ctime_current(inode);
8812 		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
8813 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
8814 		    (actual_len > inode->i_size) &&
8815 		    (cur_offset > inode->i_size)) {
8816 			if (cur_offset > actual_len)
8817 				i_size = actual_len;
8818 			else
8819 				i_size = cur_offset;
8820 			i_size_write(inode, i_size);
8821 			btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
8822 		}
8823 
8824 		ret = btrfs_update_inode(trans, BTRFS_I(inode));
8825 
8826 		if (ret) {
8827 			btrfs_abort_transaction(trans, ret);
8828 			if (own_trans)
8829 				btrfs_end_transaction(trans);
8830 			break;
8831 		}
8832 
8833 		if (own_trans) {
8834 			btrfs_end_transaction(trans);
8835 			trans = NULL;
8836 		}
8837 	}
8838 	if (clear_offset < end)
8839 		btrfs_free_reserved_data_space(BTRFS_I(inode), NULL, clear_offset,
8840 			end - clear_offset + 1);
8841 	return ret;
8842 }
8843 
8844 int btrfs_prealloc_file_range(struct inode *inode, int mode,
8845 			      u64 start, u64 num_bytes, u64 min_size,
8846 			      loff_t actual_len, u64 *alloc_hint)
8847 {
8848 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
8849 					   min_size, actual_len, alloc_hint,
8850 					   NULL);
8851 }
8852 
8853 int btrfs_prealloc_file_range_trans(struct inode *inode,
8854 				    struct btrfs_trans_handle *trans, int mode,
8855 				    u64 start, u64 num_bytes, u64 min_size,
8856 				    loff_t actual_len, u64 *alloc_hint)
8857 {
8858 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
8859 					   min_size, actual_len, alloc_hint, trans);
8860 }
8861 
8862 static int btrfs_permission(struct mnt_idmap *idmap,
8863 			    struct inode *inode, int mask)
8864 {
8865 	struct btrfs_root *root = BTRFS_I(inode)->root;
8866 	umode_t mode = inode->i_mode;
8867 
8868 	if (mask & MAY_WRITE &&
8869 	    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
8870 		if (btrfs_root_readonly(root))
8871 			return -EROFS;
8872 		if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
8873 			return -EACCES;
8874 	}
8875 	return generic_permission(idmap, inode, mask);
8876 }
8877 
8878 static int btrfs_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
8879 			 struct file *file, umode_t mode)
8880 {
8881 	struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
8882 	struct btrfs_trans_handle *trans;
8883 	struct btrfs_root *root = BTRFS_I(dir)->root;
8884 	struct inode *inode;
8885 	struct btrfs_new_inode_args new_inode_args = {
8886 		.dir = dir,
8887 		.dentry = file->f_path.dentry,
8888 		.orphan = true,
8889 	};
8890 	unsigned int trans_num_items;
8891 	int ret;
8892 
8893 	inode = new_inode(dir->i_sb);
8894 	if (!inode)
8895 		return -ENOMEM;
8896 	inode_init_owner(idmap, inode, dir, mode);
8897 	inode->i_fop = &btrfs_file_operations;
8898 	inode->i_op = &btrfs_file_inode_operations;
8899 	inode->i_mapping->a_ops = &btrfs_aops;
8900 
8901 	new_inode_args.inode = inode;
8902 	ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
8903 	if (ret)
8904 		goto out_inode;
8905 
8906 	trans = btrfs_start_transaction(root, trans_num_items);
8907 	if (IS_ERR(trans)) {
8908 		ret = PTR_ERR(trans);
8909 		goto out_new_inode_args;
8910 	}
8911 
8912 	ret = btrfs_create_new_inode(trans, &new_inode_args);
8913 
8914 	/*
8915 	 * We set number of links to 0 in btrfs_create_new_inode(), and here we
8916 	 * set it to 1 because d_tmpfile() will issue a warning if the count is
8917 	 * 0, through:
8918 	 *
8919 	 *    d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
8920 	 */
8921 	set_nlink(inode, 1);
8922 
8923 	if (!ret) {
8924 		d_tmpfile(file, inode);
8925 		unlock_new_inode(inode);
8926 		mark_inode_dirty(inode);
8927 	}
8928 
8929 	btrfs_end_transaction(trans);
8930 	btrfs_btree_balance_dirty(fs_info);
8931 out_new_inode_args:
8932 	btrfs_new_inode_args_destroy(&new_inode_args);
8933 out_inode:
8934 	if (ret)
8935 		iput(inode);
8936 	return finish_open_simple(file, ret);
8937 }
8938 
8939 void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end)
8940 {
8941 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
8942 	unsigned long index = start >> PAGE_SHIFT;
8943 	unsigned long end_index = end >> PAGE_SHIFT;
8944 	struct page *page;
8945 	u32 len;
8946 
8947 	ASSERT(end + 1 - start <= U32_MAX);
8948 	len = end + 1 - start;
8949 	while (index <= end_index) {
8950 		page = find_get_page(inode->vfs_inode.i_mapping, index);
8951 		ASSERT(page); /* Pages should be in the extent_io_tree */
8952 
8953 		/* This is for data, which doesn't yet support larger folio. */
8954 		ASSERT(folio_order(page_folio(page)) == 0);
8955 		btrfs_folio_set_writeback(fs_info, page_folio(page), start, len);
8956 		put_page(page);
8957 		index++;
8958 	}
8959 }
8960 
8961 int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
8962 					     int compress_type)
8963 {
8964 	switch (compress_type) {
8965 	case BTRFS_COMPRESS_NONE:
8966 		return BTRFS_ENCODED_IO_COMPRESSION_NONE;
8967 	case BTRFS_COMPRESS_ZLIB:
8968 		return BTRFS_ENCODED_IO_COMPRESSION_ZLIB;
8969 	case BTRFS_COMPRESS_LZO:
8970 		/*
8971 		 * The LZO format depends on the sector size. 64K is the maximum
8972 		 * sector size that we support.
8973 		 */
8974 		if (fs_info->sectorsize < SZ_4K || fs_info->sectorsize > SZ_64K)
8975 			return -EINVAL;
8976 		return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K +
8977 		       (fs_info->sectorsize_bits - 12);
8978 	case BTRFS_COMPRESS_ZSTD:
8979 		return BTRFS_ENCODED_IO_COMPRESSION_ZSTD;
8980 	default:
8981 		return -EUCLEAN;
8982 	}
8983 }
8984 
8985 static ssize_t btrfs_encoded_read_inline(
8986 				struct kiocb *iocb,
8987 				struct iov_iter *iter, u64 start,
8988 				u64 lockend,
8989 				struct extent_state **cached_state,
8990 				u64 extent_start, size_t count,
8991 				struct btrfs_ioctl_encoded_io_args *encoded,
8992 				bool *unlocked)
8993 {
8994 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
8995 	struct btrfs_root *root = inode->root;
8996 	struct btrfs_fs_info *fs_info = root->fs_info;
8997 	struct extent_io_tree *io_tree = &inode->io_tree;
8998 	struct btrfs_path *path;
8999 	struct extent_buffer *leaf;
9000 	struct btrfs_file_extent_item *item;
9001 	u64 ram_bytes;
9002 	unsigned long ptr;
9003 	void *tmp;
9004 	ssize_t ret;
9005 
9006 	path = btrfs_alloc_path();
9007 	if (!path) {
9008 		ret = -ENOMEM;
9009 		goto out;
9010 	}
9011 	ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
9012 				       extent_start, 0);
9013 	if (ret) {
9014 		if (ret > 0) {
9015 			/* The extent item disappeared? */
9016 			ret = -EIO;
9017 		}
9018 		goto out;
9019 	}
9020 	leaf = path->nodes[0];
9021 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
9022 
9023 	ram_bytes = btrfs_file_extent_ram_bytes(leaf, item);
9024 	ptr = btrfs_file_extent_inline_start(item);
9025 
9026 	encoded->len = min_t(u64, extent_start + ram_bytes,
9027 			     inode->vfs_inode.i_size) - iocb->ki_pos;
9028 	ret = btrfs_encoded_io_compression_from_extent(fs_info,
9029 				 btrfs_file_extent_compression(leaf, item));
9030 	if (ret < 0)
9031 		goto out;
9032 	encoded->compression = ret;
9033 	if (encoded->compression) {
9034 		size_t inline_size;
9035 
9036 		inline_size = btrfs_file_extent_inline_item_len(leaf,
9037 								path->slots[0]);
9038 		if (inline_size > count) {
9039 			ret = -ENOBUFS;
9040 			goto out;
9041 		}
9042 		count = inline_size;
9043 		encoded->unencoded_len = ram_bytes;
9044 		encoded->unencoded_offset = iocb->ki_pos - extent_start;
9045 	} else {
9046 		count = min_t(u64, count, encoded->len);
9047 		encoded->len = count;
9048 		encoded->unencoded_len = count;
9049 		ptr += iocb->ki_pos - extent_start;
9050 	}
9051 
9052 	tmp = kmalloc(count, GFP_NOFS);
9053 	if (!tmp) {
9054 		ret = -ENOMEM;
9055 		goto out;
9056 	}
9057 	read_extent_buffer(leaf, tmp, ptr, count);
9058 	btrfs_release_path(path);
9059 	unlock_extent(io_tree, start, lockend, cached_state);
9060 	btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9061 	*unlocked = true;
9062 
9063 	ret = copy_to_iter(tmp, count, iter);
9064 	if (ret != count)
9065 		ret = -EFAULT;
9066 	kfree(tmp);
9067 out:
9068 	btrfs_free_path(path);
9069 	return ret;
9070 }
9071 
9072 struct btrfs_encoded_read_private {
9073 	wait_queue_head_t wait;
9074 	atomic_t pending;
9075 	blk_status_t status;
9076 };
9077 
9078 static void btrfs_encoded_read_endio(struct btrfs_bio *bbio)
9079 {
9080 	struct btrfs_encoded_read_private *priv = bbio->private;
9081 
9082 	if (bbio->bio.bi_status) {
9083 		/*
9084 		 * The memory barrier implied by the atomic_dec_return() here
9085 		 * pairs with the memory barrier implied by the
9086 		 * atomic_dec_return() or io_wait_event() in
9087 		 * btrfs_encoded_read_regular_fill_pages() to ensure that this
9088 		 * write is observed before the load of status in
9089 		 * btrfs_encoded_read_regular_fill_pages().
9090 		 */
9091 		WRITE_ONCE(priv->status, bbio->bio.bi_status);
9092 	}
9093 	if (!atomic_dec_return(&priv->pending))
9094 		wake_up(&priv->wait);
9095 	bio_put(&bbio->bio);
9096 }
9097 
9098 int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
9099 					  u64 file_offset, u64 disk_bytenr,
9100 					  u64 disk_io_size, struct page **pages)
9101 {
9102 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
9103 	struct btrfs_encoded_read_private priv = {
9104 		.pending = ATOMIC_INIT(1),
9105 	};
9106 	unsigned long i = 0;
9107 	struct btrfs_bio *bbio;
9108 
9109 	init_waitqueue_head(&priv.wait);
9110 
9111 	bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
9112 			       btrfs_encoded_read_endio, &priv);
9113 	bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
9114 	bbio->inode = inode;
9115 
9116 	do {
9117 		size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE);
9118 
9119 		if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) {
9120 			atomic_inc(&priv.pending);
9121 			btrfs_submit_bio(bbio, 0);
9122 
9123 			bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
9124 					       btrfs_encoded_read_endio, &priv);
9125 			bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
9126 			bbio->inode = inode;
9127 			continue;
9128 		}
9129 
9130 		i++;
9131 		disk_bytenr += bytes;
9132 		disk_io_size -= bytes;
9133 	} while (disk_io_size);
9134 
9135 	atomic_inc(&priv.pending);
9136 	btrfs_submit_bio(bbio, 0);
9137 
9138 	if (atomic_dec_return(&priv.pending))
9139 		io_wait_event(priv.wait, !atomic_read(&priv.pending));
9140 	/* See btrfs_encoded_read_endio() for ordering. */
9141 	return blk_status_to_errno(READ_ONCE(priv.status));
9142 }
9143 
9144 static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb,
9145 					  struct iov_iter *iter,
9146 					  u64 start, u64 lockend,
9147 					  struct extent_state **cached_state,
9148 					  u64 disk_bytenr, u64 disk_io_size,
9149 					  size_t count, bool compressed,
9150 					  bool *unlocked)
9151 {
9152 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
9153 	struct extent_io_tree *io_tree = &inode->io_tree;
9154 	struct page **pages;
9155 	unsigned long nr_pages, i;
9156 	u64 cur;
9157 	size_t page_offset;
9158 	ssize_t ret;
9159 
9160 	nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE);
9161 	pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
9162 	if (!pages)
9163 		return -ENOMEM;
9164 	ret = btrfs_alloc_page_array(nr_pages, pages, false);
9165 	if (ret) {
9166 		ret = -ENOMEM;
9167 		goto out;
9168 		}
9169 
9170 	ret = btrfs_encoded_read_regular_fill_pages(inode, start, disk_bytenr,
9171 						    disk_io_size, pages);
9172 	if (ret)
9173 		goto out;
9174 
9175 	unlock_extent(io_tree, start, lockend, cached_state);
9176 	btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9177 	*unlocked = true;
9178 
9179 	if (compressed) {
9180 		i = 0;
9181 		page_offset = 0;
9182 	} else {
9183 		i = (iocb->ki_pos - start) >> PAGE_SHIFT;
9184 		page_offset = (iocb->ki_pos - start) & (PAGE_SIZE - 1);
9185 	}
9186 	cur = 0;
9187 	while (cur < count) {
9188 		size_t bytes = min_t(size_t, count - cur,
9189 				     PAGE_SIZE - page_offset);
9190 
9191 		if (copy_page_to_iter(pages[i], page_offset, bytes,
9192 				      iter) != bytes) {
9193 			ret = -EFAULT;
9194 			goto out;
9195 		}
9196 		i++;
9197 		cur += bytes;
9198 		page_offset = 0;
9199 	}
9200 	ret = count;
9201 out:
9202 	for (i = 0; i < nr_pages; i++) {
9203 		if (pages[i])
9204 			__free_page(pages[i]);
9205 	}
9206 	kfree(pages);
9207 	return ret;
9208 }
9209 
9210 ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
9211 			   struct btrfs_ioctl_encoded_io_args *encoded)
9212 {
9213 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
9214 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
9215 	struct extent_io_tree *io_tree = &inode->io_tree;
9216 	ssize_t ret;
9217 	size_t count = iov_iter_count(iter);
9218 	u64 start, lockend, disk_bytenr, disk_io_size;
9219 	struct extent_state *cached_state = NULL;
9220 	struct extent_map *em;
9221 	bool unlocked = false;
9222 
9223 	file_accessed(iocb->ki_filp);
9224 
9225 	btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
9226 
9227 	if (iocb->ki_pos >= inode->vfs_inode.i_size) {
9228 		btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9229 		return 0;
9230 	}
9231 	start = ALIGN_DOWN(iocb->ki_pos, fs_info->sectorsize);
9232 	/*
9233 	 * We don't know how long the extent containing iocb->ki_pos is, but if
9234 	 * it's compressed we know that it won't be longer than this.
9235 	 */
9236 	lockend = start + BTRFS_MAX_UNCOMPRESSED - 1;
9237 
9238 	for (;;) {
9239 		struct btrfs_ordered_extent *ordered;
9240 
9241 		ret = btrfs_wait_ordered_range(inode, start,
9242 					       lockend - start + 1);
9243 		if (ret)
9244 			goto out_unlock_inode;
9245 		lock_extent(io_tree, start, lockend, &cached_state);
9246 		ordered = btrfs_lookup_ordered_range(inode, start,
9247 						     lockend - start + 1);
9248 		if (!ordered)
9249 			break;
9250 		btrfs_put_ordered_extent(ordered);
9251 		unlock_extent(io_tree, start, lockend, &cached_state);
9252 		cond_resched();
9253 	}
9254 
9255 	em = btrfs_get_extent(inode, NULL, start, lockend - start + 1);
9256 	if (IS_ERR(em)) {
9257 		ret = PTR_ERR(em);
9258 		goto out_unlock_extent;
9259 	}
9260 
9261 	if (em->disk_bytenr == EXTENT_MAP_INLINE) {
9262 		u64 extent_start = em->start;
9263 
9264 		/*
9265 		 * For inline extents we get everything we need out of the
9266 		 * extent item.
9267 		 */
9268 		free_extent_map(em);
9269 		em = NULL;
9270 		ret = btrfs_encoded_read_inline(iocb, iter, start, lockend,
9271 						&cached_state, extent_start,
9272 						count, encoded, &unlocked);
9273 		goto out;
9274 	}
9275 
9276 	/*
9277 	 * We only want to return up to EOF even if the extent extends beyond
9278 	 * that.
9279 	 */
9280 	encoded->len = min_t(u64, extent_map_end(em),
9281 			     inode->vfs_inode.i_size) - iocb->ki_pos;
9282 	if (em->disk_bytenr == EXTENT_MAP_HOLE ||
9283 	    (em->flags & EXTENT_FLAG_PREALLOC)) {
9284 		disk_bytenr = EXTENT_MAP_HOLE;
9285 		count = min_t(u64, count, encoded->len);
9286 		encoded->len = count;
9287 		encoded->unencoded_len = count;
9288 	} else if (extent_map_is_compressed(em)) {
9289 		disk_bytenr = em->disk_bytenr;
9290 		/*
9291 		 * Bail if the buffer isn't large enough to return the whole
9292 		 * compressed extent.
9293 		 */
9294 		if (em->disk_num_bytes > count) {
9295 			ret = -ENOBUFS;
9296 			goto out_em;
9297 		}
9298 		disk_io_size = em->disk_num_bytes;
9299 		count = em->disk_num_bytes;
9300 		encoded->unencoded_len = em->ram_bytes;
9301 		encoded->unencoded_offset = iocb->ki_pos - (em->start - em->offset);
9302 		ret = btrfs_encoded_io_compression_from_extent(fs_info,
9303 							       extent_map_compression(em));
9304 		if (ret < 0)
9305 			goto out_em;
9306 		encoded->compression = ret;
9307 	} else {
9308 		disk_bytenr = extent_map_block_start(em) + (start - em->start);
9309 		if (encoded->len > count)
9310 			encoded->len = count;
9311 		/*
9312 		 * Don't read beyond what we locked. This also limits the page
9313 		 * allocations that we'll do.
9314 		 */
9315 		disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start;
9316 		count = start + disk_io_size - iocb->ki_pos;
9317 		encoded->len = count;
9318 		encoded->unencoded_len = count;
9319 		disk_io_size = ALIGN(disk_io_size, fs_info->sectorsize);
9320 	}
9321 	free_extent_map(em);
9322 	em = NULL;
9323 
9324 	if (disk_bytenr == EXTENT_MAP_HOLE) {
9325 		unlock_extent(io_tree, start, lockend, &cached_state);
9326 		btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9327 		unlocked = true;
9328 		ret = iov_iter_zero(count, iter);
9329 		if (ret != count)
9330 			ret = -EFAULT;
9331 	} else {
9332 		ret = btrfs_encoded_read_regular(iocb, iter, start, lockend,
9333 						 &cached_state, disk_bytenr,
9334 						 disk_io_size, count,
9335 						 encoded->compression,
9336 						 &unlocked);
9337 	}
9338 
9339 out:
9340 	if (ret >= 0)
9341 		iocb->ki_pos += encoded->len;
9342 out_em:
9343 	free_extent_map(em);
9344 out_unlock_extent:
9345 	if (!unlocked)
9346 		unlock_extent(io_tree, start, lockend, &cached_state);
9347 out_unlock_inode:
9348 	if (!unlocked)
9349 		btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9350 	return ret;
9351 }
9352 
9353 ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
9354 			       const struct btrfs_ioctl_encoded_io_args *encoded)
9355 {
9356 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
9357 	struct btrfs_root *root = inode->root;
9358 	struct btrfs_fs_info *fs_info = root->fs_info;
9359 	struct extent_io_tree *io_tree = &inode->io_tree;
9360 	struct extent_changeset *data_reserved = NULL;
9361 	struct extent_state *cached_state = NULL;
9362 	struct btrfs_ordered_extent *ordered;
9363 	struct btrfs_file_extent file_extent;
9364 	int compression;
9365 	size_t orig_count;
9366 	u64 start, end;
9367 	u64 num_bytes, ram_bytes, disk_num_bytes;
9368 	unsigned long nr_folios, i;
9369 	struct folio **folios;
9370 	struct btrfs_key ins;
9371 	bool extent_reserved = false;
9372 	struct extent_map *em;
9373 	ssize_t ret;
9374 
9375 	switch (encoded->compression) {
9376 	case BTRFS_ENCODED_IO_COMPRESSION_ZLIB:
9377 		compression = BTRFS_COMPRESS_ZLIB;
9378 		break;
9379 	case BTRFS_ENCODED_IO_COMPRESSION_ZSTD:
9380 		compression = BTRFS_COMPRESS_ZSTD;
9381 		break;
9382 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K:
9383 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K:
9384 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K:
9385 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K:
9386 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K:
9387 		/* The sector size must match for LZO. */
9388 		if (encoded->compression -
9389 		    BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 12 !=
9390 		    fs_info->sectorsize_bits)
9391 			return -EINVAL;
9392 		compression = BTRFS_COMPRESS_LZO;
9393 		break;
9394 	default:
9395 		return -EINVAL;
9396 	}
9397 	if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE)
9398 		return -EINVAL;
9399 
9400 	/*
9401 	 * Compressed extents should always have checksums, so error out if we
9402 	 * have a NOCOW file or inode was created while mounted with NODATASUM.
9403 	 */
9404 	if (inode->flags & BTRFS_INODE_NODATASUM)
9405 		return -EINVAL;
9406 
9407 	orig_count = iov_iter_count(from);
9408 
9409 	/* The extent size must be sane. */
9410 	if (encoded->unencoded_len > BTRFS_MAX_UNCOMPRESSED ||
9411 	    orig_count > BTRFS_MAX_COMPRESSED || orig_count == 0)
9412 		return -EINVAL;
9413 
9414 	/*
9415 	 * The compressed data must be smaller than the decompressed data.
9416 	 *
9417 	 * It's of course possible for data to compress to larger or the same
9418 	 * size, but the buffered I/O path falls back to no compression for such
9419 	 * data, and we don't want to break any assumptions by creating these
9420 	 * extents.
9421 	 *
9422 	 * Note that this is less strict than the current check we have that the
9423 	 * compressed data must be at least one sector smaller than the
9424 	 * decompressed data. We only want to enforce the weaker requirement
9425 	 * from old kernels that it is at least one byte smaller.
9426 	 */
9427 	if (orig_count >= encoded->unencoded_len)
9428 		return -EINVAL;
9429 
9430 	/* The extent must start on a sector boundary. */
9431 	start = iocb->ki_pos;
9432 	if (!IS_ALIGNED(start, fs_info->sectorsize))
9433 		return -EINVAL;
9434 
9435 	/*
9436 	 * The extent must end on a sector boundary. However, we allow a write
9437 	 * which ends at or extends i_size to have an unaligned length; we round
9438 	 * up the extent size and set i_size to the unaligned end.
9439 	 */
9440 	if (start + encoded->len < inode->vfs_inode.i_size &&
9441 	    !IS_ALIGNED(start + encoded->len, fs_info->sectorsize))
9442 		return -EINVAL;
9443 
9444 	/* Finally, the offset in the unencoded data must be sector-aligned. */
9445 	if (!IS_ALIGNED(encoded->unencoded_offset, fs_info->sectorsize))
9446 		return -EINVAL;
9447 
9448 	num_bytes = ALIGN(encoded->len, fs_info->sectorsize);
9449 	ram_bytes = ALIGN(encoded->unencoded_len, fs_info->sectorsize);
9450 	end = start + num_bytes - 1;
9451 
9452 	/*
9453 	 * If the extent cannot be inline, the compressed data on disk must be
9454 	 * sector-aligned. For convenience, we extend it with zeroes if it
9455 	 * isn't.
9456 	 */
9457 	disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize);
9458 	nr_folios = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE);
9459 	folios = kvcalloc(nr_folios, sizeof(struct page *), GFP_KERNEL_ACCOUNT);
9460 	if (!folios)
9461 		return -ENOMEM;
9462 	for (i = 0; i < nr_folios; i++) {
9463 		size_t bytes = min_t(size_t, PAGE_SIZE, iov_iter_count(from));
9464 		char *kaddr;
9465 
9466 		folios[i] = folio_alloc(GFP_KERNEL_ACCOUNT, 0);
9467 		if (!folios[i]) {
9468 			ret = -ENOMEM;
9469 			goto out_folios;
9470 		}
9471 		kaddr = kmap_local_folio(folios[i], 0);
9472 		if (copy_from_iter(kaddr, bytes, from) != bytes) {
9473 			kunmap_local(kaddr);
9474 			ret = -EFAULT;
9475 			goto out_folios;
9476 		}
9477 		if (bytes < PAGE_SIZE)
9478 			memset(kaddr + bytes, 0, PAGE_SIZE - bytes);
9479 		kunmap_local(kaddr);
9480 	}
9481 
9482 	for (;;) {
9483 		struct btrfs_ordered_extent *ordered;
9484 
9485 		ret = btrfs_wait_ordered_range(inode, start, num_bytes);
9486 		if (ret)
9487 			goto out_folios;
9488 		ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping,
9489 						    start >> PAGE_SHIFT,
9490 						    end >> PAGE_SHIFT);
9491 		if (ret)
9492 			goto out_folios;
9493 		lock_extent(io_tree, start, end, &cached_state);
9494 		ordered = btrfs_lookup_ordered_range(inode, start, num_bytes);
9495 		if (!ordered &&
9496 		    !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end))
9497 			break;
9498 		if (ordered)
9499 			btrfs_put_ordered_extent(ordered);
9500 		unlock_extent(io_tree, start, end, &cached_state);
9501 		cond_resched();
9502 	}
9503 
9504 	/*
9505 	 * We don't use the higher-level delalloc space functions because our
9506 	 * num_bytes and disk_num_bytes are different.
9507 	 */
9508 	ret = btrfs_alloc_data_chunk_ondemand(inode, disk_num_bytes);
9509 	if (ret)
9510 		goto out_unlock;
9511 	ret = btrfs_qgroup_reserve_data(inode, &data_reserved, start, num_bytes);
9512 	if (ret)
9513 		goto out_free_data_space;
9514 	ret = btrfs_delalloc_reserve_metadata(inode, num_bytes, disk_num_bytes,
9515 					      false);
9516 	if (ret)
9517 		goto out_qgroup_free_data;
9518 
9519 	/* Try an inline extent first. */
9520 	if (encoded->unencoded_len == encoded->len &&
9521 	    encoded->unencoded_offset == 0 &&
9522 	    can_cow_file_range_inline(inode, start, encoded->len, orig_count)) {
9523 		ret = __cow_file_range_inline(inode, start, encoded->len,
9524 					      orig_count, compression, folios[0],
9525 					      true);
9526 		if (ret <= 0) {
9527 			if (ret == 0)
9528 				ret = orig_count;
9529 			goto out_delalloc_release;
9530 		}
9531 	}
9532 
9533 	ret = btrfs_reserve_extent(root, disk_num_bytes, disk_num_bytes,
9534 				   disk_num_bytes, 0, 0, &ins, 1, 1);
9535 	if (ret)
9536 		goto out_delalloc_release;
9537 	extent_reserved = true;
9538 
9539 	file_extent.disk_bytenr = ins.objectid;
9540 	file_extent.disk_num_bytes = ins.offset;
9541 	file_extent.num_bytes = num_bytes;
9542 	file_extent.ram_bytes = ram_bytes;
9543 	file_extent.offset = encoded->unencoded_offset;
9544 	file_extent.compression = compression;
9545 	em = btrfs_create_io_em(inode, start, &file_extent, BTRFS_ORDERED_COMPRESSED);
9546 	if (IS_ERR(em)) {
9547 		ret = PTR_ERR(em);
9548 		goto out_free_reserved;
9549 	}
9550 	free_extent_map(em);
9551 
9552 	ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
9553 				       (1 << BTRFS_ORDERED_ENCODED) |
9554 				       (1 << BTRFS_ORDERED_COMPRESSED));
9555 	if (IS_ERR(ordered)) {
9556 		btrfs_drop_extent_map_range(inode, start, end, false);
9557 		ret = PTR_ERR(ordered);
9558 		goto out_free_reserved;
9559 	}
9560 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
9561 
9562 	if (start + encoded->len > inode->vfs_inode.i_size)
9563 		i_size_write(&inode->vfs_inode, start + encoded->len);
9564 
9565 	unlock_extent(io_tree, start, end, &cached_state);
9566 
9567 	btrfs_delalloc_release_extents(inode, num_bytes);
9568 
9569 	btrfs_submit_compressed_write(ordered, folios, nr_folios, 0, false);
9570 	ret = orig_count;
9571 	goto out;
9572 
9573 out_free_reserved:
9574 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
9575 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
9576 out_delalloc_release:
9577 	btrfs_delalloc_release_extents(inode, num_bytes);
9578 	btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0);
9579 out_qgroup_free_data:
9580 	if (ret < 0)
9581 		btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes, NULL);
9582 out_free_data_space:
9583 	/*
9584 	 * If btrfs_reserve_extent() succeeded, then we already decremented
9585 	 * bytes_may_use.
9586 	 */
9587 	if (!extent_reserved)
9588 		btrfs_free_reserved_data_space_noquota(fs_info, disk_num_bytes);
9589 out_unlock:
9590 	unlock_extent(io_tree, start, end, &cached_state);
9591 out_folios:
9592 	for (i = 0; i < nr_folios; i++) {
9593 		if (folios[i])
9594 			folio_put(folios[i]);
9595 	}
9596 	kvfree(folios);
9597 out:
9598 	if (ret >= 0)
9599 		iocb->ki_pos += encoded->len;
9600 	return ret;
9601 }
9602 
9603 #ifdef CONFIG_SWAP
9604 /*
9605  * Add an entry indicating a block group or device which is pinned by a
9606  * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a
9607  * negative errno on failure.
9608  */
9609 static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr,
9610 				  bool is_block_group)
9611 {
9612 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
9613 	struct btrfs_swapfile_pin *sp, *entry;
9614 	struct rb_node **p;
9615 	struct rb_node *parent = NULL;
9616 
9617 	sp = kmalloc(sizeof(*sp), GFP_NOFS);
9618 	if (!sp)
9619 		return -ENOMEM;
9620 	sp->ptr = ptr;
9621 	sp->inode = inode;
9622 	sp->is_block_group = is_block_group;
9623 	sp->bg_extent_count = 1;
9624 
9625 	spin_lock(&fs_info->swapfile_pins_lock);
9626 	p = &fs_info->swapfile_pins.rb_node;
9627 	while (*p) {
9628 		parent = *p;
9629 		entry = rb_entry(parent, struct btrfs_swapfile_pin, node);
9630 		if (sp->ptr < entry->ptr ||
9631 		    (sp->ptr == entry->ptr && sp->inode < entry->inode)) {
9632 			p = &(*p)->rb_left;
9633 		} else if (sp->ptr > entry->ptr ||
9634 			   (sp->ptr == entry->ptr && sp->inode > entry->inode)) {
9635 			p = &(*p)->rb_right;
9636 		} else {
9637 			if (is_block_group)
9638 				entry->bg_extent_count++;
9639 			spin_unlock(&fs_info->swapfile_pins_lock);
9640 			kfree(sp);
9641 			return 1;
9642 		}
9643 	}
9644 	rb_link_node(&sp->node, parent, p);
9645 	rb_insert_color(&sp->node, &fs_info->swapfile_pins);
9646 	spin_unlock(&fs_info->swapfile_pins_lock);
9647 	return 0;
9648 }
9649 
9650 /* Free all of the entries pinned by this swapfile. */
9651 static void btrfs_free_swapfile_pins(struct inode *inode)
9652 {
9653 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
9654 	struct btrfs_swapfile_pin *sp;
9655 	struct rb_node *node, *next;
9656 
9657 	spin_lock(&fs_info->swapfile_pins_lock);
9658 	node = rb_first(&fs_info->swapfile_pins);
9659 	while (node) {
9660 		next = rb_next(node);
9661 		sp = rb_entry(node, struct btrfs_swapfile_pin, node);
9662 		if (sp->inode == inode) {
9663 			rb_erase(&sp->node, &fs_info->swapfile_pins);
9664 			if (sp->is_block_group) {
9665 				btrfs_dec_block_group_swap_extents(sp->ptr,
9666 							   sp->bg_extent_count);
9667 				btrfs_put_block_group(sp->ptr);
9668 			}
9669 			kfree(sp);
9670 		}
9671 		node = next;
9672 	}
9673 	spin_unlock(&fs_info->swapfile_pins_lock);
9674 }
9675 
9676 struct btrfs_swap_info {
9677 	u64 start;
9678 	u64 block_start;
9679 	u64 block_len;
9680 	u64 lowest_ppage;
9681 	u64 highest_ppage;
9682 	unsigned long nr_pages;
9683 	int nr_extents;
9684 };
9685 
9686 static int btrfs_add_swap_extent(struct swap_info_struct *sis,
9687 				 struct btrfs_swap_info *bsi)
9688 {
9689 	unsigned long nr_pages;
9690 	unsigned long max_pages;
9691 	u64 first_ppage, first_ppage_reported, next_ppage;
9692 	int ret;
9693 
9694 	/*
9695 	 * Our swapfile may have had its size extended after the swap header was
9696 	 * written. In that case activating the swapfile should not go beyond
9697 	 * the max size set in the swap header.
9698 	 */
9699 	if (bsi->nr_pages >= sis->max)
9700 		return 0;
9701 
9702 	max_pages = sis->max - bsi->nr_pages;
9703 	first_ppage = PAGE_ALIGN(bsi->block_start) >> PAGE_SHIFT;
9704 	next_ppage = PAGE_ALIGN_DOWN(bsi->block_start + bsi->block_len) >> PAGE_SHIFT;
9705 
9706 	if (first_ppage >= next_ppage)
9707 		return 0;
9708 	nr_pages = next_ppage - first_ppage;
9709 	nr_pages = min(nr_pages, max_pages);
9710 
9711 	first_ppage_reported = first_ppage;
9712 	if (bsi->start == 0)
9713 		first_ppage_reported++;
9714 	if (bsi->lowest_ppage > first_ppage_reported)
9715 		bsi->lowest_ppage = first_ppage_reported;
9716 	if (bsi->highest_ppage < (next_ppage - 1))
9717 		bsi->highest_ppage = next_ppage - 1;
9718 
9719 	ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage);
9720 	if (ret < 0)
9721 		return ret;
9722 	bsi->nr_extents += ret;
9723 	bsi->nr_pages += nr_pages;
9724 	return 0;
9725 }
9726 
9727 static void btrfs_swap_deactivate(struct file *file)
9728 {
9729 	struct inode *inode = file_inode(file);
9730 
9731 	btrfs_free_swapfile_pins(inode);
9732 	atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles);
9733 }
9734 
9735 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
9736 			       sector_t *span)
9737 {
9738 	struct inode *inode = file_inode(file);
9739 	struct btrfs_root *root = BTRFS_I(inode)->root;
9740 	struct btrfs_fs_info *fs_info = root->fs_info;
9741 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
9742 	struct extent_state *cached_state = NULL;
9743 	struct extent_map *em = NULL;
9744 	struct btrfs_chunk_map *map = NULL;
9745 	struct btrfs_device *device = NULL;
9746 	struct btrfs_swap_info bsi = {
9747 		.lowest_ppage = (sector_t)-1ULL,
9748 	};
9749 	int ret = 0;
9750 	u64 isize;
9751 	u64 start;
9752 
9753 	/*
9754 	 * If the swap file was just created, make sure delalloc is done. If the
9755 	 * file changes again after this, the user is doing something stupid and
9756 	 * we don't really care.
9757 	 */
9758 	ret = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
9759 	if (ret)
9760 		return ret;
9761 
9762 	/*
9763 	 * The inode is locked, so these flags won't change after we check them.
9764 	 */
9765 	if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) {
9766 		btrfs_warn(fs_info, "swapfile must not be compressed");
9767 		return -EINVAL;
9768 	}
9769 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) {
9770 		btrfs_warn(fs_info, "swapfile must not be copy-on-write");
9771 		return -EINVAL;
9772 	}
9773 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
9774 		btrfs_warn(fs_info, "swapfile must not be checksummed");
9775 		return -EINVAL;
9776 	}
9777 
9778 	/*
9779 	 * Balance or device remove/replace/resize can move stuff around from
9780 	 * under us. The exclop protection makes sure they aren't running/won't
9781 	 * run concurrently while we are mapping the swap extents, and
9782 	 * fs_info->swapfile_pins prevents them from running while the swap
9783 	 * file is active and moving the extents. Note that this also prevents
9784 	 * a concurrent device add which isn't actually necessary, but it's not
9785 	 * really worth the trouble to allow it.
9786 	 */
9787 	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) {
9788 		btrfs_warn(fs_info,
9789 	   "cannot activate swapfile while exclusive operation is running");
9790 		return -EBUSY;
9791 	}
9792 
9793 	/*
9794 	 * Prevent snapshot creation while we are activating the swap file.
9795 	 * We do not want to race with snapshot creation. If snapshot creation
9796 	 * already started before we bumped nr_swapfiles from 0 to 1 and
9797 	 * completes before the first write into the swap file after it is
9798 	 * activated, than that write would fallback to COW.
9799 	 */
9800 	if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) {
9801 		btrfs_exclop_finish(fs_info);
9802 		btrfs_warn(fs_info,
9803 	   "cannot activate swapfile because snapshot creation is in progress");
9804 		return -EINVAL;
9805 	}
9806 	/*
9807 	 * Snapshots can create extents which require COW even if NODATACOW is
9808 	 * set. We use this counter to prevent snapshots. We must increment it
9809 	 * before walking the extents because we don't want a concurrent
9810 	 * snapshot to run after we've already checked the extents.
9811 	 *
9812 	 * It is possible that subvolume is marked for deletion but still not
9813 	 * removed yet. To prevent this race, we check the root status before
9814 	 * activating the swapfile.
9815 	 */
9816 	spin_lock(&root->root_item_lock);
9817 	if (btrfs_root_dead(root)) {
9818 		spin_unlock(&root->root_item_lock);
9819 
9820 		btrfs_exclop_finish(fs_info);
9821 		btrfs_warn(fs_info,
9822 		"cannot activate swapfile because subvolume %llu is being deleted",
9823 			btrfs_root_id(root));
9824 		return -EPERM;
9825 	}
9826 	atomic_inc(&root->nr_swapfiles);
9827 	spin_unlock(&root->root_item_lock);
9828 
9829 	isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
9830 
9831 	lock_extent(io_tree, 0, isize - 1, &cached_state);
9832 	start = 0;
9833 	while (start < isize) {
9834 		u64 logical_block_start, physical_block_start;
9835 		struct btrfs_block_group *bg;
9836 		u64 len = isize - start;
9837 
9838 		em = btrfs_get_extent(BTRFS_I(inode), NULL, start, len);
9839 		if (IS_ERR(em)) {
9840 			ret = PTR_ERR(em);
9841 			goto out;
9842 		}
9843 
9844 		if (em->disk_bytenr == EXTENT_MAP_HOLE) {
9845 			btrfs_warn(fs_info, "swapfile must not have holes");
9846 			ret = -EINVAL;
9847 			goto out;
9848 		}
9849 		if (em->disk_bytenr == EXTENT_MAP_INLINE) {
9850 			/*
9851 			 * It's unlikely we'll ever actually find ourselves
9852 			 * here, as a file small enough to fit inline won't be
9853 			 * big enough to store more than the swap header, but in
9854 			 * case something changes in the future, let's catch it
9855 			 * here rather than later.
9856 			 */
9857 			btrfs_warn(fs_info, "swapfile must not be inline");
9858 			ret = -EINVAL;
9859 			goto out;
9860 		}
9861 		if (extent_map_is_compressed(em)) {
9862 			btrfs_warn(fs_info, "swapfile must not be compressed");
9863 			ret = -EINVAL;
9864 			goto out;
9865 		}
9866 
9867 		logical_block_start = extent_map_block_start(em) + (start - em->start);
9868 		len = min(len, em->len - (start - em->start));
9869 		free_extent_map(em);
9870 		em = NULL;
9871 
9872 		ret = can_nocow_extent(inode, start, &len, NULL, false, true);
9873 		if (ret < 0) {
9874 			goto out;
9875 		} else if (ret) {
9876 			ret = 0;
9877 		} else {
9878 			btrfs_warn(fs_info,
9879 				   "swapfile must not be copy-on-write");
9880 			ret = -EINVAL;
9881 			goto out;
9882 		}
9883 
9884 		map = btrfs_get_chunk_map(fs_info, logical_block_start, len);
9885 		if (IS_ERR(map)) {
9886 			ret = PTR_ERR(map);
9887 			goto out;
9888 		}
9889 
9890 		if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
9891 			btrfs_warn(fs_info,
9892 				   "swapfile must have single data profile");
9893 			ret = -EINVAL;
9894 			goto out;
9895 		}
9896 
9897 		if (device == NULL) {
9898 			device = map->stripes[0].dev;
9899 			ret = btrfs_add_swapfile_pin(inode, device, false);
9900 			if (ret == 1)
9901 				ret = 0;
9902 			else if (ret)
9903 				goto out;
9904 		} else if (device != map->stripes[0].dev) {
9905 			btrfs_warn(fs_info, "swapfile must be on one device");
9906 			ret = -EINVAL;
9907 			goto out;
9908 		}
9909 
9910 		physical_block_start = (map->stripes[0].physical +
9911 					(logical_block_start - map->start));
9912 		len = min(len, map->chunk_len - (logical_block_start - map->start));
9913 		btrfs_free_chunk_map(map);
9914 		map = NULL;
9915 
9916 		bg = btrfs_lookup_block_group(fs_info, logical_block_start);
9917 		if (!bg) {
9918 			btrfs_warn(fs_info,
9919 			   "could not find block group containing swapfile");
9920 			ret = -EINVAL;
9921 			goto out;
9922 		}
9923 
9924 		if (!btrfs_inc_block_group_swap_extents(bg)) {
9925 			btrfs_warn(fs_info,
9926 			   "block group for swapfile at %llu is read-only%s",
9927 			   bg->start,
9928 			   atomic_read(&fs_info->scrubs_running) ?
9929 				       " (scrub running)" : "");
9930 			btrfs_put_block_group(bg);
9931 			ret = -EINVAL;
9932 			goto out;
9933 		}
9934 
9935 		ret = btrfs_add_swapfile_pin(inode, bg, true);
9936 		if (ret) {
9937 			btrfs_put_block_group(bg);
9938 			if (ret == 1)
9939 				ret = 0;
9940 			else
9941 				goto out;
9942 		}
9943 
9944 		if (bsi.block_len &&
9945 		    bsi.block_start + bsi.block_len == physical_block_start) {
9946 			bsi.block_len += len;
9947 		} else {
9948 			if (bsi.block_len) {
9949 				ret = btrfs_add_swap_extent(sis, &bsi);
9950 				if (ret)
9951 					goto out;
9952 			}
9953 			bsi.start = start;
9954 			bsi.block_start = physical_block_start;
9955 			bsi.block_len = len;
9956 		}
9957 
9958 		start += len;
9959 	}
9960 
9961 	if (bsi.block_len)
9962 		ret = btrfs_add_swap_extent(sis, &bsi);
9963 
9964 out:
9965 	if (!IS_ERR_OR_NULL(em))
9966 		free_extent_map(em);
9967 	if (!IS_ERR_OR_NULL(map))
9968 		btrfs_free_chunk_map(map);
9969 
9970 	unlock_extent(io_tree, 0, isize - 1, &cached_state);
9971 
9972 	if (ret)
9973 		btrfs_swap_deactivate(file);
9974 
9975 	btrfs_drew_write_unlock(&root->snapshot_lock);
9976 
9977 	btrfs_exclop_finish(fs_info);
9978 
9979 	if (ret)
9980 		return ret;
9981 
9982 	if (device)
9983 		sis->bdev = device->bdev;
9984 	*span = bsi.highest_ppage - bsi.lowest_ppage + 1;
9985 	sis->max = bsi.nr_pages;
9986 	sis->pages = bsi.nr_pages - 1;
9987 	sis->highest_bit = bsi.nr_pages - 1;
9988 	return bsi.nr_extents;
9989 }
9990 #else
9991 static void btrfs_swap_deactivate(struct file *file)
9992 {
9993 }
9994 
9995 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
9996 			       sector_t *span)
9997 {
9998 	return -EOPNOTSUPP;
9999 }
10000 #endif
10001 
10002 /*
10003  * Update the number of bytes used in the VFS' inode. When we replace extents in
10004  * a range (clone, dedupe, fallocate's zero range), we must update the number of
10005  * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls
10006  * always get a correct value.
10007  */
10008 void btrfs_update_inode_bytes(struct btrfs_inode *inode,
10009 			      const u64 add_bytes,
10010 			      const u64 del_bytes)
10011 {
10012 	if (add_bytes == del_bytes)
10013 		return;
10014 
10015 	spin_lock(&inode->lock);
10016 	if (del_bytes > 0)
10017 		inode_sub_bytes(&inode->vfs_inode, del_bytes);
10018 	if (add_bytes > 0)
10019 		inode_add_bytes(&inode->vfs_inode, add_bytes);
10020 	spin_unlock(&inode->lock);
10021 }
10022 
10023 /*
10024  * Verify that there are no ordered extents for a given file range.
10025  *
10026  * @inode:   The target inode.
10027  * @start:   Start offset of the file range, should be sector size aligned.
10028  * @end:     End offset (inclusive) of the file range, its value +1 should be
10029  *           sector size aligned.
10030  *
10031  * This should typically be used for cases where we locked an inode's VFS lock in
10032  * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode,
10033  * we have flushed all delalloc in the range, we have waited for all ordered
10034  * extents in the range to complete and finally we have locked the file range in
10035  * the inode's io_tree.
10036  */
10037 void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end)
10038 {
10039 	struct btrfs_root *root = inode->root;
10040 	struct btrfs_ordered_extent *ordered;
10041 
10042 	if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
10043 		return;
10044 
10045 	ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start);
10046 	if (ordered) {
10047 		btrfs_err(root->fs_info,
10048 "found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])",
10049 			  start, end, btrfs_ino(inode), btrfs_root_id(root),
10050 			  ordered->file_offset,
10051 			  ordered->file_offset + ordered->num_bytes - 1);
10052 		btrfs_put_ordered_extent(ordered);
10053 	}
10054 
10055 	ASSERT(ordered == NULL);
10056 }
10057 
10058 /*
10059  * Find the first inode with a minimum number.
10060  *
10061  * @root:	The root to search for.
10062  * @min_ino:	The minimum inode number.
10063  *
10064  * Find the first inode in the @root with a number >= @min_ino and return it.
10065  * Returns NULL if no such inode found.
10066  */
10067 struct btrfs_inode *btrfs_find_first_inode(struct btrfs_root *root, u64 min_ino)
10068 {
10069 	struct btrfs_inode *inode;
10070 	unsigned long from = min_ino;
10071 
10072 	xa_lock(&root->inodes);
10073 	while (true) {
10074 		inode = xa_find(&root->inodes, &from, ULONG_MAX, XA_PRESENT);
10075 		if (!inode)
10076 			break;
10077 		if (igrab(&inode->vfs_inode))
10078 			break;
10079 
10080 		from = btrfs_ino(inode) + 1;
10081 		cond_resched_lock(&root->inodes.xa_lock);
10082 	}
10083 	xa_unlock(&root->inodes);
10084 
10085 	return inode;
10086 }
10087 
10088 static const struct inode_operations btrfs_dir_inode_operations = {
10089 	.getattr	= btrfs_getattr,
10090 	.lookup		= btrfs_lookup,
10091 	.create		= btrfs_create,
10092 	.unlink		= btrfs_unlink,
10093 	.link		= btrfs_link,
10094 	.mkdir		= btrfs_mkdir,
10095 	.rmdir		= btrfs_rmdir,
10096 	.rename		= btrfs_rename2,
10097 	.symlink	= btrfs_symlink,
10098 	.setattr	= btrfs_setattr,
10099 	.mknod		= btrfs_mknod,
10100 	.listxattr	= btrfs_listxattr,
10101 	.permission	= btrfs_permission,
10102 	.get_inode_acl	= btrfs_get_acl,
10103 	.set_acl	= btrfs_set_acl,
10104 	.update_time	= btrfs_update_time,
10105 	.tmpfile        = btrfs_tmpfile,
10106 	.fileattr_get	= btrfs_fileattr_get,
10107 	.fileattr_set	= btrfs_fileattr_set,
10108 };
10109 
10110 static const struct file_operations btrfs_dir_file_operations = {
10111 	.llseek		= btrfs_dir_llseek,
10112 	.read		= generic_read_dir,
10113 	.iterate_shared	= btrfs_real_readdir,
10114 	.open		= btrfs_opendir,
10115 	.unlocked_ioctl	= btrfs_ioctl,
10116 #ifdef CONFIG_COMPAT
10117 	.compat_ioctl	= btrfs_compat_ioctl,
10118 #endif
10119 	.release        = btrfs_release_file,
10120 	.fsync		= btrfs_sync_file,
10121 };
10122 
10123 /*
10124  * btrfs doesn't support the bmap operation because swapfiles
10125  * use bmap to make a mapping of extents in the file.  They assume
10126  * these extents won't change over the life of the file and they
10127  * use the bmap result to do IO directly to the drive.
10128  *
10129  * the btrfs bmap call would return logical addresses that aren't
10130  * suitable for IO and they also will change frequently as COW
10131  * operations happen.  So, swapfile + btrfs == corruption.
10132  *
10133  * For now we're avoiding this by dropping bmap.
10134  */
10135 static const struct address_space_operations btrfs_aops = {
10136 	.read_folio	= btrfs_read_folio,
10137 	.writepages	= btrfs_writepages,
10138 	.readahead	= btrfs_readahead,
10139 	.invalidate_folio = btrfs_invalidate_folio,
10140 	.release_folio	= btrfs_release_folio,
10141 	.migrate_folio	= btrfs_migrate_folio,
10142 	.dirty_folio	= filemap_dirty_folio,
10143 	.error_remove_folio = generic_error_remove_folio,
10144 	.swap_activate	= btrfs_swap_activate,
10145 	.swap_deactivate = btrfs_swap_deactivate,
10146 };
10147 
10148 static const struct inode_operations btrfs_file_inode_operations = {
10149 	.getattr	= btrfs_getattr,
10150 	.setattr	= btrfs_setattr,
10151 	.listxattr      = btrfs_listxattr,
10152 	.permission	= btrfs_permission,
10153 	.fiemap		= btrfs_fiemap,
10154 	.get_inode_acl	= btrfs_get_acl,
10155 	.set_acl	= btrfs_set_acl,
10156 	.update_time	= btrfs_update_time,
10157 	.fileattr_get	= btrfs_fileattr_get,
10158 	.fileattr_set	= btrfs_fileattr_set,
10159 };
10160 static const struct inode_operations btrfs_special_inode_operations = {
10161 	.getattr	= btrfs_getattr,
10162 	.setattr	= btrfs_setattr,
10163 	.permission	= btrfs_permission,
10164 	.listxattr	= btrfs_listxattr,
10165 	.get_inode_acl	= btrfs_get_acl,
10166 	.set_acl	= btrfs_set_acl,
10167 	.update_time	= btrfs_update_time,
10168 };
10169 static const struct inode_operations btrfs_symlink_inode_operations = {
10170 	.get_link	= page_get_link,
10171 	.getattr	= btrfs_getattr,
10172 	.setattr	= btrfs_setattr,
10173 	.permission	= btrfs_permission,
10174 	.listxattr	= btrfs_listxattr,
10175 	.update_time	= btrfs_update_time,
10176 };
10177 
10178 const struct dentry_operations btrfs_dentry_operations = {
10179 	.d_delete	= btrfs_dentry_delete,
10180 };
10181