xref: /linux/fs/btrfs/inode.c (revision fdfd6dde4328635861db029f6fdb649e17350526)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <crypto/hash.h>
7 #include <linux/kernel.h>
8 #include <linux/bio.h>
9 #include <linux/blk-cgroup.h>
10 #include <linux/file.h>
11 #include <linux/fs.h>
12 #include <linux/pagemap.h>
13 #include <linux/highmem.h>
14 #include <linux/time.h>
15 #include <linux/init.h>
16 #include <linux/string.h>
17 #include <linux/backing-dev.h>
18 #include <linux/writeback.h>
19 #include <linux/compat.h>
20 #include <linux/xattr.h>
21 #include <linux/posix_acl.h>
22 #include <linux/falloc.h>
23 #include <linux/slab.h>
24 #include <linux/ratelimit.h>
25 #include <linux/btrfs.h>
26 #include <linux/blkdev.h>
27 #include <linux/posix_acl_xattr.h>
28 #include <linux/uio.h>
29 #include <linux/magic.h>
30 #include <linux/iversion.h>
31 #include <linux/swap.h>
32 #include <linux/migrate.h>
33 #include <linux/sched/mm.h>
34 #include <linux/iomap.h>
35 #include <asm/unaligned.h>
36 #include <linux/fsverity.h>
37 #include "misc.h"
38 #include "ctree.h"
39 #include "disk-io.h"
40 #include "transaction.h"
41 #include "btrfs_inode.h"
42 #include "print-tree.h"
43 #include "ordered-data.h"
44 #include "xattr.h"
45 #include "tree-log.h"
46 #include "bio.h"
47 #include "compression.h"
48 #include "locking.h"
49 #include "free-space-cache.h"
50 #include "props.h"
51 #include "qgroup.h"
52 #include "delalloc-space.h"
53 #include "block-group.h"
54 #include "space-info.h"
55 #include "zoned.h"
56 #include "subpage.h"
57 #include "inode-item.h"
58 #include "fs.h"
59 #include "accessors.h"
60 #include "extent-tree.h"
61 #include "root-tree.h"
62 #include "defrag.h"
63 #include "dir-item.h"
64 #include "file-item.h"
65 #include "uuid-tree.h"
66 #include "ioctl.h"
67 #include "file.h"
68 #include "acl.h"
69 #include "relocation.h"
70 #include "verity.h"
71 #include "super.h"
72 #include "orphan.h"
73 #include "backref.h"
74 #include "raid-stripe-tree.h"
75 
76 struct btrfs_iget_args {
77 	u64 ino;
78 	struct btrfs_root *root;
79 };
80 
81 struct btrfs_dio_data {
82 	ssize_t submitted;
83 	struct extent_changeset *data_reserved;
84 	struct btrfs_ordered_extent *ordered;
85 	bool data_space_reserved;
86 	bool nocow_done;
87 };
88 
89 struct btrfs_dio_private {
90 	/* Range of I/O */
91 	u64 file_offset;
92 	u32 bytes;
93 
94 	/* This must be last */
95 	struct btrfs_bio bbio;
96 };
97 
98 static struct bio_set btrfs_dio_bioset;
99 
100 struct btrfs_rename_ctx {
101 	/* Output field. Stores the index number of the old directory entry. */
102 	u64 index;
103 };
104 
105 /*
106  * Used by data_reloc_print_warning_inode() to pass needed info for filename
107  * resolution and output of error message.
108  */
109 struct data_reloc_warn {
110 	struct btrfs_path path;
111 	struct btrfs_fs_info *fs_info;
112 	u64 extent_item_size;
113 	u64 logical;
114 	int mirror_num;
115 };
116 
117 /*
118  * For the file_extent_tree, we want to hold the inode lock when we lookup and
119  * update the disk_i_size, but lockdep will complain because our io_tree we hold
120  * the tree lock and get the inode lock when setting delalloc. These two things
121  * are unrelated, so make a class for the file_extent_tree so we don't get the
122  * two locking patterns mixed up.
123  */
124 static struct lock_class_key file_extent_tree_class;
125 
126 static const struct inode_operations btrfs_dir_inode_operations;
127 static const struct inode_operations btrfs_symlink_inode_operations;
128 static const struct inode_operations btrfs_special_inode_operations;
129 static const struct inode_operations btrfs_file_inode_operations;
130 static const struct address_space_operations btrfs_aops;
131 static const struct file_operations btrfs_dir_file_operations;
132 
133 static struct kmem_cache *btrfs_inode_cachep;
134 
135 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
136 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback);
137 
138 static noinline int run_delalloc_cow(struct btrfs_inode *inode,
139 				     struct page *locked_page, u64 start,
140 				     u64 end, struct writeback_control *wbc,
141 				     bool pages_dirty);
142 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
143 				       u64 len, u64 orig_start, u64 block_start,
144 				       u64 block_len, u64 orig_block_len,
145 				       u64 ram_bytes, int compress_type,
146 				       int type);
147 
148 static int data_reloc_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
149 					  u64 root, void *warn_ctx)
150 {
151 	struct data_reloc_warn *warn = warn_ctx;
152 	struct btrfs_fs_info *fs_info = warn->fs_info;
153 	struct extent_buffer *eb;
154 	struct btrfs_inode_item *inode_item;
155 	struct inode_fs_paths *ipath = NULL;
156 	struct btrfs_root *local_root;
157 	struct btrfs_key key;
158 	unsigned int nofs_flag;
159 	u32 nlink;
160 	int ret;
161 
162 	local_root = btrfs_get_fs_root(fs_info, root, true);
163 	if (IS_ERR(local_root)) {
164 		ret = PTR_ERR(local_root);
165 		goto err;
166 	}
167 
168 	/* This makes the path point to (inum INODE_ITEM ioff). */
169 	key.objectid = inum;
170 	key.type = BTRFS_INODE_ITEM_KEY;
171 	key.offset = 0;
172 
173 	ret = btrfs_search_slot(NULL, local_root, &key, &warn->path, 0, 0);
174 	if (ret) {
175 		btrfs_put_root(local_root);
176 		btrfs_release_path(&warn->path);
177 		goto err;
178 	}
179 
180 	eb = warn->path.nodes[0];
181 	inode_item = btrfs_item_ptr(eb, warn->path.slots[0], struct btrfs_inode_item);
182 	nlink = btrfs_inode_nlink(eb, inode_item);
183 	btrfs_release_path(&warn->path);
184 
185 	nofs_flag = memalloc_nofs_save();
186 	ipath = init_ipath(4096, local_root, &warn->path);
187 	memalloc_nofs_restore(nofs_flag);
188 	if (IS_ERR(ipath)) {
189 		btrfs_put_root(local_root);
190 		ret = PTR_ERR(ipath);
191 		ipath = NULL;
192 		/*
193 		 * -ENOMEM, not a critical error, just output an generic error
194 		 * without filename.
195 		 */
196 		btrfs_warn(fs_info,
197 "checksum error at logical %llu mirror %u root %llu, inode %llu offset %llu",
198 			   warn->logical, warn->mirror_num, root, inum, offset);
199 		return ret;
200 	}
201 	ret = paths_from_inode(inum, ipath);
202 	if (ret < 0)
203 		goto err;
204 
205 	/*
206 	 * We deliberately ignore the bit ipath might have been too small to
207 	 * hold all of the paths here
208 	 */
209 	for (int i = 0; i < ipath->fspath->elem_cnt; i++) {
210 		btrfs_warn(fs_info,
211 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu length %u links %u (path: %s)",
212 			   warn->logical, warn->mirror_num, root, inum, offset,
213 			   fs_info->sectorsize, nlink,
214 			   (char *)(unsigned long)ipath->fspath->val[i]);
215 	}
216 
217 	btrfs_put_root(local_root);
218 	free_ipath(ipath);
219 	return 0;
220 
221 err:
222 	btrfs_warn(fs_info,
223 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu, path resolving failed with ret=%d",
224 		   warn->logical, warn->mirror_num, root, inum, offset, ret);
225 
226 	free_ipath(ipath);
227 	return ret;
228 }
229 
230 /*
231  * Do extra user-friendly error output (e.g. lookup all the affected files).
232  *
233  * Return true if we succeeded doing the backref lookup.
234  * Return false if such lookup failed, and has to fallback to the old error message.
235  */
236 static void print_data_reloc_error(const struct btrfs_inode *inode, u64 file_off,
237 				   const u8 *csum, const u8 *csum_expected,
238 				   int mirror_num)
239 {
240 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
241 	struct btrfs_path path = { 0 };
242 	struct btrfs_key found_key = { 0 };
243 	struct extent_buffer *eb;
244 	struct btrfs_extent_item *ei;
245 	const u32 csum_size = fs_info->csum_size;
246 	u64 logical;
247 	u64 flags;
248 	u32 item_size;
249 	int ret;
250 
251 	mutex_lock(&fs_info->reloc_mutex);
252 	logical = btrfs_get_reloc_bg_bytenr(fs_info);
253 	mutex_unlock(&fs_info->reloc_mutex);
254 
255 	if (logical == U64_MAX) {
256 		btrfs_warn_rl(fs_info, "has data reloc tree but no running relocation");
257 		btrfs_warn_rl(fs_info,
258 "csum failed root %lld ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
259 			inode->root->root_key.objectid, btrfs_ino(inode), file_off,
260 			CSUM_FMT_VALUE(csum_size, csum),
261 			CSUM_FMT_VALUE(csum_size, csum_expected),
262 			mirror_num);
263 		return;
264 	}
265 
266 	logical += file_off;
267 	btrfs_warn_rl(fs_info,
268 "csum failed root %lld ino %llu off %llu logical %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
269 			inode->root->root_key.objectid,
270 			btrfs_ino(inode), file_off, logical,
271 			CSUM_FMT_VALUE(csum_size, csum),
272 			CSUM_FMT_VALUE(csum_size, csum_expected),
273 			mirror_num);
274 
275 	ret = extent_from_logical(fs_info, logical, &path, &found_key, &flags);
276 	if (ret < 0) {
277 		btrfs_err_rl(fs_info, "failed to lookup extent item for logical %llu: %d",
278 			     logical, ret);
279 		return;
280 	}
281 	eb = path.nodes[0];
282 	ei = btrfs_item_ptr(eb, path.slots[0], struct btrfs_extent_item);
283 	item_size = btrfs_item_size(eb, path.slots[0]);
284 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
285 		unsigned long ptr = 0;
286 		u64 ref_root;
287 		u8 ref_level;
288 
289 		while (true) {
290 			ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
291 						      item_size, &ref_root,
292 						      &ref_level);
293 			if (ret < 0) {
294 				btrfs_warn_rl(fs_info,
295 				"failed to resolve tree backref for logical %llu: %d",
296 					      logical, ret);
297 				break;
298 			}
299 			if (ret > 0)
300 				break;
301 
302 			btrfs_warn_rl(fs_info,
303 "csum error at logical %llu mirror %u: metadata %s (level %d) in tree %llu",
304 				logical, mirror_num,
305 				(ref_level ? "node" : "leaf"),
306 				ref_level, ref_root);
307 		}
308 		btrfs_release_path(&path);
309 	} else {
310 		struct btrfs_backref_walk_ctx ctx = { 0 };
311 		struct data_reloc_warn reloc_warn = { 0 };
312 
313 		btrfs_release_path(&path);
314 
315 		ctx.bytenr = found_key.objectid;
316 		ctx.extent_item_pos = logical - found_key.objectid;
317 		ctx.fs_info = fs_info;
318 
319 		reloc_warn.logical = logical;
320 		reloc_warn.extent_item_size = found_key.offset;
321 		reloc_warn.mirror_num = mirror_num;
322 		reloc_warn.fs_info = fs_info;
323 
324 		iterate_extent_inodes(&ctx, true,
325 				      data_reloc_print_warning_inode, &reloc_warn);
326 	}
327 }
328 
329 static void __cold btrfs_print_data_csum_error(struct btrfs_inode *inode,
330 		u64 logical_start, u8 *csum, u8 *csum_expected, int mirror_num)
331 {
332 	struct btrfs_root *root = inode->root;
333 	const u32 csum_size = root->fs_info->csum_size;
334 
335 	/* For data reloc tree, it's better to do a backref lookup instead. */
336 	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
337 		return print_data_reloc_error(inode, logical_start, csum,
338 					      csum_expected, mirror_num);
339 
340 	/* Output without objectid, which is more meaningful */
341 	if (root->root_key.objectid >= BTRFS_LAST_FREE_OBJECTID) {
342 		btrfs_warn_rl(root->fs_info,
343 "csum failed root %lld ino %lld off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
344 			root->root_key.objectid, btrfs_ino(inode),
345 			logical_start,
346 			CSUM_FMT_VALUE(csum_size, csum),
347 			CSUM_FMT_VALUE(csum_size, csum_expected),
348 			mirror_num);
349 	} else {
350 		btrfs_warn_rl(root->fs_info,
351 "csum failed root %llu ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
352 			root->root_key.objectid, btrfs_ino(inode),
353 			logical_start,
354 			CSUM_FMT_VALUE(csum_size, csum),
355 			CSUM_FMT_VALUE(csum_size, csum_expected),
356 			mirror_num);
357 	}
358 }
359 
360 /*
361  * Lock inode i_rwsem based on arguments passed.
362  *
363  * ilock_flags can have the following bit set:
364  *
365  * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode
366  * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt
367  *		     return -EAGAIN
368  * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock
369  */
370 int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags)
371 {
372 	if (ilock_flags & BTRFS_ILOCK_SHARED) {
373 		if (ilock_flags & BTRFS_ILOCK_TRY) {
374 			if (!inode_trylock_shared(&inode->vfs_inode))
375 				return -EAGAIN;
376 			else
377 				return 0;
378 		}
379 		inode_lock_shared(&inode->vfs_inode);
380 	} else {
381 		if (ilock_flags & BTRFS_ILOCK_TRY) {
382 			if (!inode_trylock(&inode->vfs_inode))
383 				return -EAGAIN;
384 			else
385 				return 0;
386 		}
387 		inode_lock(&inode->vfs_inode);
388 	}
389 	if (ilock_flags & BTRFS_ILOCK_MMAP)
390 		down_write(&inode->i_mmap_lock);
391 	return 0;
392 }
393 
394 /*
395  * Unock inode i_rwsem.
396  *
397  * ilock_flags should contain the same bits set as passed to btrfs_inode_lock()
398  * to decide whether the lock acquired is shared or exclusive.
399  */
400 void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags)
401 {
402 	if (ilock_flags & BTRFS_ILOCK_MMAP)
403 		up_write(&inode->i_mmap_lock);
404 	if (ilock_flags & BTRFS_ILOCK_SHARED)
405 		inode_unlock_shared(&inode->vfs_inode);
406 	else
407 		inode_unlock(&inode->vfs_inode);
408 }
409 
410 /*
411  * Cleanup all submitted ordered extents in specified range to handle errors
412  * from the btrfs_run_delalloc_range() callback.
413  *
414  * NOTE: caller must ensure that when an error happens, it can not call
415  * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
416  * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
417  * to be released, which we want to happen only when finishing the ordered
418  * extent (btrfs_finish_ordered_io()).
419  */
420 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
421 						 struct page *locked_page,
422 						 u64 offset, u64 bytes)
423 {
424 	unsigned long index = offset >> PAGE_SHIFT;
425 	unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
426 	u64 page_start = 0, page_end = 0;
427 	struct page *page;
428 
429 	if (locked_page) {
430 		page_start = page_offset(locked_page);
431 		page_end = page_start + PAGE_SIZE - 1;
432 	}
433 
434 	while (index <= end_index) {
435 		/*
436 		 * For locked page, we will call btrfs_mark_ordered_io_finished
437 		 * through btrfs_mark_ordered_io_finished() on it
438 		 * in run_delalloc_range() for the error handling, which will
439 		 * clear page Ordered and run the ordered extent accounting.
440 		 *
441 		 * Here we can't just clear the Ordered bit, or
442 		 * btrfs_mark_ordered_io_finished() would skip the accounting
443 		 * for the page range, and the ordered extent will never finish.
444 		 */
445 		if (locked_page && index == (page_start >> PAGE_SHIFT)) {
446 			index++;
447 			continue;
448 		}
449 		page = find_get_page(inode->vfs_inode.i_mapping, index);
450 		index++;
451 		if (!page)
452 			continue;
453 
454 		/*
455 		 * Here we just clear all Ordered bits for every page in the
456 		 * range, then btrfs_mark_ordered_io_finished() will handle
457 		 * the ordered extent accounting for the range.
458 		 */
459 		btrfs_folio_clamp_clear_ordered(inode->root->fs_info,
460 						page_folio(page), offset, bytes);
461 		put_page(page);
462 	}
463 
464 	if (locked_page) {
465 		/* The locked page covers the full range, nothing needs to be done */
466 		if (bytes + offset <= page_start + PAGE_SIZE)
467 			return;
468 		/*
469 		 * In case this page belongs to the delalloc range being
470 		 * instantiated then skip it, since the first page of a range is
471 		 * going to be properly cleaned up by the caller of
472 		 * run_delalloc_range
473 		 */
474 		if (page_start >= offset && page_end <= (offset + bytes - 1)) {
475 			bytes = offset + bytes - page_offset(locked_page) - PAGE_SIZE;
476 			offset = page_offset(locked_page) + PAGE_SIZE;
477 		}
478 	}
479 
480 	return btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes, false);
481 }
482 
483 static int btrfs_dirty_inode(struct btrfs_inode *inode);
484 
485 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
486 				     struct btrfs_new_inode_args *args)
487 {
488 	int err;
489 
490 	if (args->default_acl) {
491 		err = __btrfs_set_acl(trans, args->inode, args->default_acl,
492 				      ACL_TYPE_DEFAULT);
493 		if (err)
494 			return err;
495 	}
496 	if (args->acl) {
497 		err = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS);
498 		if (err)
499 			return err;
500 	}
501 	if (!args->default_acl && !args->acl)
502 		cache_no_acl(args->inode);
503 	return btrfs_xattr_security_init(trans, args->inode, args->dir,
504 					 &args->dentry->d_name);
505 }
506 
507 /*
508  * this does all the hard work for inserting an inline extent into
509  * the btree.  The caller should have done a btrfs_drop_extents so that
510  * no overlapping inline items exist in the btree
511  */
512 static int insert_inline_extent(struct btrfs_trans_handle *trans,
513 				struct btrfs_path *path,
514 				struct btrfs_inode *inode, bool extent_inserted,
515 				size_t size, size_t compressed_size,
516 				int compress_type,
517 				struct page **compressed_pages,
518 				bool update_i_size)
519 {
520 	struct btrfs_root *root = inode->root;
521 	struct extent_buffer *leaf;
522 	struct page *page = NULL;
523 	char *kaddr;
524 	unsigned long ptr;
525 	struct btrfs_file_extent_item *ei;
526 	int ret;
527 	size_t cur_size = size;
528 	u64 i_size;
529 
530 	ASSERT((compressed_size > 0 && compressed_pages) ||
531 	       (compressed_size == 0 && !compressed_pages));
532 
533 	if (compressed_size && compressed_pages)
534 		cur_size = compressed_size;
535 
536 	if (!extent_inserted) {
537 		struct btrfs_key key;
538 		size_t datasize;
539 
540 		key.objectid = btrfs_ino(inode);
541 		key.offset = 0;
542 		key.type = BTRFS_EXTENT_DATA_KEY;
543 
544 		datasize = btrfs_file_extent_calc_inline_size(cur_size);
545 		ret = btrfs_insert_empty_item(trans, root, path, &key,
546 					      datasize);
547 		if (ret)
548 			goto fail;
549 	}
550 	leaf = path->nodes[0];
551 	ei = btrfs_item_ptr(leaf, path->slots[0],
552 			    struct btrfs_file_extent_item);
553 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
554 	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
555 	btrfs_set_file_extent_encryption(leaf, ei, 0);
556 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
557 	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
558 	ptr = btrfs_file_extent_inline_start(ei);
559 
560 	if (compress_type != BTRFS_COMPRESS_NONE) {
561 		struct page *cpage;
562 		int i = 0;
563 		while (compressed_size > 0) {
564 			cpage = compressed_pages[i];
565 			cur_size = min_t(unsigned long, compressed_size,
566 				       PAGE_SIZE);
567 
568 			kaddr = kmap_local_page(cpage);
569 			write_extent_buffer(leaf, kaddr, ptr, cur_size);
570 			kunmap_local(kaddr);
571 
572 			i++;
573 			ptr += cur_size;
574 			compressed_size -= cur_size;
575 		}
576 		btrfs_set_file_extent_compression(leaf, ei,
577 						  compress_type);
578 	} else {
579 		page = find_get_page(inode->vfs_inode.i_mapping, 0);
580 		btrfs_set_file_extent_compression(leaf, ei, 0);
581 		kaddr = kmap_local_page(page);
582 		write_extent_buffer(leaf, kaddr, ptr, size);
583 		kunmap_local(kaddr);
584 		put_page(page);
585 	}
586 	btrfs_mark_buffer_dirty(trans, leaf);
587 	btrfs_release_path(path);
588 
589 	/*
590 	 * We align size to sectorsize for inline extents just for simplicity
591 	 * sake.
592 	 */
593 	ret = btrfs_inode_set_file_extent_range(inode, 0,
594 					ALIGN(size, root->fs_info->sectorsize));
595 	if (ret)
596 		goto fail;
597 
598 	/*
599 	 * We're an inline extent, so nobody can extend the file past i_size
600 	 * without locking a page we already have locked.
601 	 *
602 	 * We must do any i_size and inode updates before we unlock the pages.
603 	 * Otherwise we could end up racing with unlink.
604 	 */
605 	i_size = i_size_read(&inode->vfs_inode);
606 	if (update_i_size && size > i_size) {
607 		i_size_write(&inode->vfs_inode, size);
608 		i_size = size;
609 	}
610 	inode->disk_i_size = i_size;
611 
612 fail:
613 	return ret;
614 }
615 
616 
617 /*
618  * conditionally insert an inline extent into the file.  This
619  * does the checks required to make sure the data is small enough
620  * to fit as an inline extent.
621  */
622 static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 size,
623 					  size_t compressed_size,
624 					  int compress_type,
625 					  struct page **compressed_pages,
626 					  bool update_i_size)
627 {
628 	struct btrfs_drop_extents_args drop_args = { 0 };
629 	struct btrfs_root *root = inode->root;
630 	struct btrfs_fs_info *fs_info = root->fs_info;
631 	struct btrfs_trans_handle *trans;
632 	u64 data_len = (compressed_size ?: size);
633 	int ret;
634 	struct btrfs_path *path;
635 
636 	/*
637 	 * We can create an inline extent if it ends at or beyond the current
638 	 * i_size, is no larger than a sector (decompressed), and the (possibly
639 	 * compressed) data fits in a leaf and the configured maximum inline
640 	 * size.
641 	 */
642 	if (size < i_size_read(&inode->vfs_inode) ||
643 	    size > fs_info->sectorsize ||
644 	    data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
645 	    data_len > fs_info->max_inline)
646 		return 1;
647 
648 	path = btrfs_alloc_path();
649 	if (!path)
650 		return -ENOMEM;
651 
652 	trans = btrfs_join_transaction(root);
653 	if (IS_ERR(trans)) {
654 		btrfs_free_path(path);
655 		return PTR_ERR(trans);
656 	}
657 	trans->block_rsv = &inode->block_rsv;
658 
659 	drop_args.path = path;
660 	drop_args.start = 0;
661 	drop_args.end = fs_info->sectorsize;
662 	drop_args.drop_cache = true;
663 	drop_args.replace_extent = true;
664 	drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(data_len);
665 	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
666 	if (ret) {
667 		btrfs_abort_transaction(trans, ret);
668 		goto out;
669 	}
670 
671 	ret = insert_inline_extent(trans, path, inode, drop_args.extent_inserted,
672 				   size, compressed_size, compress_type,
673 				   compressed_pages, update_i_size);
674 	if (ret && ret != -ENOSPC) {
675 		btrfs_abort_transaction(trans, ret);
676 		goto out;
677 	} else if (ret == -ENOSPC) {
678 		ret = 1;
679 		goto out;
680 	}
681 
682 	btrfs_update_inode_bytes(inode, size, drop_args.bytes_found);
683 	ret = btrfs_update_inode(trans, inode);
684 	if (ret && ret != -ENOSPC) {
685 		btrfs_abort_transaction(trans, ret);
686 		goto out;
687 	} else if (ret == -ENOSPC) {
688 		ret = 1;
689 		goto out;
690 	}
691 
692 	btrfs_set_inode_full_sync(inode);
693 out:
694 	/*
695 	 * Don't forget to free the reserved space, as for inlined extent
696 	 * it won't count as data extent, free them directly here.
697 	 * And at reserve time, it's always aligned to page size, so
698 	 * just free one page here.
699 	 */
700 	btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE, NULL);
701 	btrfs_free_path(path);
702 	btrfs_end_transaction(trans);
703 	return ret;
704 }
705 
706 struct async_extent {
707 	u64 start;
708 	u64 ram_size;
709 	u64 compressed_size;
710 	struct page **pages;
711 	unsigned long nr_pages;
712 	int compress_type;
713 	struct list_head list;
714 };
715 
716 struct async_chunk {
717 	struct btrfs_inode *inode;
718 	struct page *locked_page;
719 	u64 start;
720 	u64 end;
721 	blk_opf_t write_flags;
722 	struct list_head extents;
723 	struct cgroup_subsys_state *blkcg_css;
724 	struct btrfs_work work;
725 	struct async_cow *async_cow;
726 };
727 
728 struct async_cow {
729 	atomic_t num_chunks;
730 	struct async_chunk chunks[];
731 };
732 
733 static noinline int add_async_extent(struct async_chunk *cow,
734 				     u64 start, u64 ram_size,
735 				     u64 compressed_size,
736 				     struct page **pages,
737 				     unsigned long nr_pages,
738 				     int compress_type)
739 {
740 	struct async_extent *async_extent;
741 
742 	async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
743 	BUG_ON(!async_extent); /* -ENOMEM */
744 	async_extent->start = start;
745 	async_extent->ram_size = ram_size;
746 	async_extent->compressed_size = compressed_size;
747 	async_extent->pages = pages;
748 	async_extent->nr_pages = nr_pages;
749 	async_extent->compress_type = compress_type;
750 	list_add_tail(&async_extent->list, &cow->extents);
751 	return 0;
752 }
753 
754 /*
755  * Check if the inode needs to be submitted to compression, based on mount
756  * options, defragmentation, properties or heuristics.
757  */
758 static inline int inode_need_compress(struct btrfs_inode *inode, u64 start,
759 				      u64 end)
760 {
761 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
762 
763 	if (!btrfs_inode_can_compress(inode)) {
764 		WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
765 			KERN_ERR "BTRFS: unexpected compression for ino %llu\n",
766 			btrfs_ino(inode));
767 		return 0;
768 	}
769 	/*
770 	 * Special check for subpage.
771 	 *
772 	 * We lock the full page then run each delalloc range in the page, thus
773 	 * for the following case, we will hit some subpage specific corner case:
774 	 *
775 	 * 0		32K		64K
776 	 * |	|///////|	|///////|
777 	 *		\- A		\- B
778 	 *
779 	 * In above case, both range A and range B will try to unlock the full
780 	 * page [0, 64K), causing the one finished later will have page
781 	 * unlocked already, triggering various page lock requirement BUG_ON()s.
782 	 *
783 	 * So here we add an artificial limit that subpage compression can only
784 	 * if the range is fully page aligned.
785 	 *
786 	 * In theory we only need to ensure the first page is fully covered, but
787 	 * the tailing partial page will be locked until the full compression
788 	 * finishes, delaying the write of other range.
789 	 *
790 	 * TODO: Make btrfs_run_delalloc_range() to lock all delalloc range
791 	 * first to prevent any submitted async extent to unlock the full page.
792 	 * By this, we can ensure for subpage case that only the last async_cow
793 	 * will unlock the full page.
794 	 */
795 	if (fs_info->sectorsize < PAGE_SIZE) {
796 		if (!PAGE_ALIGNED(start) ||
797 		    !PAGE_ALIGNED(end + 1))
798 			return 0;
799 	}
800 
801 	/* force compress */
802 	if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
803 		return 1;
804 	/* defrag ioctl */
805 	if (inode->defrag_compress)
806 		return 1;
807 	/* bad compression ratios */
808 	if (inode->flags & BTRFS_INODE_NOCOMPRESS)
809 		return 0;
810 	if (btrfs_test_opt(fs_info, COMPRESS) ||
811 	    inode->flags & BTRFS_INODE_COMPRESS ||
812 	    inode->prop_compress)
813 		return btrfs_compress_heuristic(&inode->vfs_inode, start, end);
814 	return 0;
815 }
816 
817 static inline void inode_should_defrag(struct btrfs_inode *inode,
818 		u64 start, u64 end, u64 num_bytes, u32 small_write)
819 {
820 	/* If this is a small write inside eof, kick off a defrag */
821 	if (num_bytes < small_write &&
822 	    (start > 0 || end + 1 < inode->disk_i_size))
823 		btrfs_add_inode_defrag(NULL, inode, small_write);
824 }
825 
826 /*
827  * Work queue call back to started compression on a file and pages.
828  *
829  * This is done inside an ordered work queue, and the compression is spread
830  * across many cpus.  The actual IO submission is step two, and the ordered work
831  * queue takes care of making sure that happens in the same order things were
832  * put onto the queue by writepages and friends.
833  *
834  * If this code finds it can't get good compression, it puts an entry onto the
835  * work queue to write the uncompressed bytes.  This makes sure that both
836  * compressed inodes and uncompressed inodes are written in the same order that
837  * the flusher thread sent them down.
838  */
839 static void compress_file_range(struct btrfs_work *work)
840 {
841 	struct async_chunk *async_chunk =
842 		container_of(work, struct async_chunk, work);
843 	struct btrfs_inode *inode = async_chunk->inode;
844 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
845 	struct address_space *mapping = inode->vfs_inode.i_mapping;
846 	u64 blocksize = fs_info->sectorsize;
847 	u64 start = async_chunk->start;
848 	u64 end = async_chunk->end;
849 	u64 actual_end;
850 	u64 i_size;
851 	int ret = 0;
852 	struct page **pages;
853 	unsigned long nr_pages;
854 	unsigned long total_compressed = 0;
855 	unsigned long total_in = 0;
856 	unsigned int poff;
857 	int i;
858 	int compress_type = fs_info->compress_type;
859 
860 	inode_should_defrag(inode, start, end, end - start + 1, SZ_16K);
861 
862 	/*
863 	 * We need to call clear_page_dirty_for_io on each page in the range.
864 	 * Otherwise applications with the file mmap'd can wander in and change
865 	 * the page contents while we are compressing them.
866 	 */
867 	extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end);
868 
869 	/*
870 	 * We need to save i_size before now because it could change in between
871 	 * us evaluating the size and assigning it.  This is because we lock and
872 	 * unlock the page in truncate and fallocate, and then modify the i_size
873 	 * later on.
874 	 *
875 	 * The barriers are to emulate READ_ONCE, remove that once i_size_read
876 	 * does that for us.
877 	 */
878 	barrier();
879 	i_size = i_size_read(&inode->vfs_inode);
880 	barrier();
881 	actual_end = min_t(u64, i_size, end + 1);
882 again:
883 	pages = NULL;
884 	nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
885 	nr_pages = min_t(unsigned long, nr_pages, BTRFS_MAX_COMPRESSED_PAGES);
886 
887 	/*
888 	 * we don't want to send crud past the end of i_size through
889 	 * compression, that's just a waste of CPU time.  So, if the
890 	 * end of the file is before the start of our current
891 	 * requested range of bytes, we bail out to the uncompressed
892 	 * cleanup code that can deal with all of this.
893 	 *
894 	 * It isn't really the fastest way to fix things, but this is a
895 	 * very uncommon corner.
896 	 */
897 	if (actual_end <= start)
898 		goto cleanup_and_bail_uncompressed;
899 
900 	total_compressed = actual_end - start;
901 
902 	/*
903 	 * Skip compression for a small file range(<=blocksize) that
904 	 * isn't an inline extent, since it doesn't save disk space at all.
905 	 */
906 	if (total_compressed <= blocksize &&
907 	   (start > 0 || end + 1 < inode->disk_i_size))
908 		goto cleanup_and_bail_uncompressed;
909 
910 	/*
911 	 * For subpage case, we require full page alignment for the sector
912 	 * aligned range.
913 	 * Thus we must also check against @actual_end, not just @end.
914 	 */
915 	if (blocksize < PAGE_SIZE) {
916 		if (!PAGE_ALIGNED(start) ||
917 		    !PAGE_ALIGNED(round_up(actual_end, blocksize)))
918 			goto cleanup_and_bail_uncompressed;
919 	}
920 
921 	total_compressed = min_t(unsigned long, total_compressed,
922 			BTRFS_MAX_UNCOMPRESSED);
923 	total_in = 0;
924 	ret = 0;
925 
926 	/*
927 	 * We do compression for mount -o compress and when the inode has not
928 	 * been flagged as NOCOMPRESS.  This flag can change at any time if we
929 	 * discover bad compression ratios.
930 	 */
931 	if (!inode_need_compress(inode, start, end))
932 		goto cleanup_and_bail_uncompressed;
933 
934 	pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
935 	if (!pages) {
936 		/*
937 		 * Memory allocation failure is not a fatal error, we can fall
938 		 * back to uncompressed code.
939 		 */
940 		goto cleanup_and_bail_uncompressed;
941 	}
942 
943 	if (inode->defrag_compress)
944 		compress_type = inode->defrag_compress;
945 	else if (inode->prop_compress)
946 		compress_type = inode->prop_compress;
947 
948 	/* Compression level is applied here. */
949 	ret = btrfs_compress_pages(compress_type | (fs_info->compress_level << 4),
950 				   mapping, start, pages, &nr_pages, &total_in,
951 				   &total_compressed);
952 	if (ret)
953 		goto mark_incompressible;
954 
955 	/*
956 	 * Zero the tail end of the last page, as we might be sending it down
957 	 * to disk.
958 	 */
959 	poff = offset_in_page(total_compressed);
960 	if (poff)
961 		memzero_page(pages[nr_pages - 1], poff, PAGE_SIZE - poff);
962 
963 	/*
964 	 * Try to create an inline extent.
965 	 *
966 	 * If we didn't compress the entire range, try to create an uncompressed
967 	 * inline extent, else a compressed one.
968 	 *
969 	 * Check cow_file_range() for why we don't even try to create inline
970 	 * extent for the subpage case.
971 	 */
972 	if (start == 0 && fs_info->sectorsize == PAGE_SIZE) {
973 		if (total_in < actual_end) {
974 			ret = cow_file_range_inline(inode, actual_end, 0,
975 						    BTRFS_COMPRESS_NONE, NULL,
976 						    false);
977 		} else {
978 			ret = cow_file_range_inline(inode, actual_end,
979 						    total_compressed,
980 						    compress_type, pages,
981 						    false);
982 		}
983 		if (ret <= 0) {
984 			unsigned long clear_flags = EXTENT_DELALLOC |
985 				EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
986 				EXTENT_DO_ACCOUNTING;
987 
988 			if (ret < 0)
989 				mapping_set_error(mapping, -EIO);
990 
991 			/*
992 			 * inline extent creation worked or returned error,
993 			 * we don't need to create any more async work items.
994 			 * Unlock and free up our temp pages.
995 			 *
996 			 * We use DO_ACCOUNTING here because we need the
997 			 * delalloc_release_metadata to be done _after_ we drop
998 			 * our outstanding extent for clearing delalloc for this
999 			 * range.
1000 			 */
1001 			extent_clear_unlock_delalloc(inode, start, end,
1002 						     NULL,
1003 						     clear_flags,
1004 						     PAGE_UNLOCK |
1005 						     PAGE_START_WRITEBACK |
1006 						     PAGE_END_WRITEBACK);
1007 			goto free_pages;
1008 		}
1009 	}
1010 
1011 	/*
1012 	 * We aren't doing an inline extent. Round the compressed size up to a
1013 	 * block size boundary so the allocator does sane things.
1014 	 */
1015 	total_compressed = ALIGN(total_compressed, blocksize);
1016 
1017 	/*
1018 	 * One last check to make sure the compression is really a win, compare
1019 	 * the page count read with the blocks on disk, compression must free at
1020 	 * least one sector.
1021 	 */
1022 	total_in = round_up(total_in, fs_info->sectorsize);
1023 	if (total_compressed + blocksize > total_in)
1024 		goto mark_incompressible;
1025 
1026 	/*
1027 	 * The async work queues will take care of doing actual allocation on
1028 	 * disk for these compressed pages, and will submit the bios.
1029 	 */
1030 	add_async_extent(async_chunk, start, total_in, total_compressed, pages,
1031 			 nr_pages, compress_type);
1032 	if (start + total_in < end) {
1033 		start += total_in;
1034 		cond_resched();
1035 		goto again;
1036 	}
1037 	return;
1038 
1039 mark_incompressible:
1040 	if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && !inode->prop_compress)
1041 		inode->flags |= BTRFS_INODE_NOCOMPRESS;
1042 cleanup_and_bail_uncompressed:
1043 	add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0,
1044 			 BTRFS_COMPRESS_NONE);
1045 free_pages:
1046 	if (pages) {
1047 		for (i = 0; i < nr_pages; i++) {
1048 			WARN_ON(pages[i]->mapping);
1049 			btrfs_free_compr_page(pages[i]);
1050 		}
1051 		kfree(pages);
1052 	}
1053 }
1054 
1055 static void free_async_extent_pages(struct async_extent *async_extent)
1056 {
1057 	int i;
1058 
1059 	if (!async_extent->pages)
1060 		return;
1061 
1062 	for (i = 0; i < async_extent->nr_pages; i++) {
1063 		WARN_ON(async_extent->pages[i]->mapping);
1064 		btrfs_free_compr_page(async_extent->pages[i]);
1065 	}
1066 	kfree(async_extent->pages);
1067 	async_extent->nr_pages = 0;
1068 	async_extent->pages = NULL;
1069 }
1070 
1071 static void submit_uncompressed_range(struct btrfs_inode *inode,
1072 				      struct async_extent *async_extent,
1073 				      struct page *locked_page)
1074 {
1075 	u64 start = async_extent->start;
1076 	u64 end = async_extent->start + async_extent->ram_size - 1;
1077 	int ret;
1078 	struct writeback_control wbc = {
1079 		.sync_mode		= WB_SYNC_ALL,
1080 		.range_start		= start,
1081 		.range_end		= end,
1082 		.no_cgroup_owner	= 1,
1083 	};
1084 
1085 	wbc_attach_fdatawrite_inode(&wbc, &inode->vfs_inode);
1086 	ret = run_delalloc_cow(inode, locked_page, start, end, &wbc, false);
1087 	wbc_detach_inode(&wbc);
1088 	if (ret < 0) {
1089 		btrfs_cleanup_ordered_extents(inode, locked_page, start, end - start + 1);
1090 		if (locked_page) {
1091 			const u64 page_start = page_offset(locked_page);
1092 
1093 			set_page_writeback(locked_page);
1094 			end_page_writeback(locked_page);
1095 			btrfs_mark_ordered_io_finished(inode, locked_page,
1096 						       page_start, PAGE_SIZE,
1097 						       !ret);
1098 			mapping_set_error(locked_page->mapping, ret);
1099 			unlock_page(locked_page);
1100 		}
1101 	}
1102 }
1103 
1104 static void submit_one_async_extent(struct async_chunk *async_chunk,
1105 				    struct async_extent *async_extent,
1106 				    u64 *alloc_hint)
1107 {
1108 	struct btrfs_inode *inode = async_chunk->inode;
1109 	struct extent_io_tree *io_tree = &inode->io_tree;
1110 	struct btrfs_root *root = inode->root;
1111 	struct btrfs_fs_info *fs_info = root->fs_info;
1112 	struct btrfs_ordered_extent *ordered;
1113 	struct btrfs_key ins;
1114 	struct page *locked_page = NULL;
1115 	struct extent_map *em;
1116 	int ret = 0;
1117 	u64 start = async_extent->start;
1118 	u64 end = async_extent->start + async_extent->ram_size - 1;
1119 
1120 	if (async_chunk->blkcg_css)
1121 		kthread_associate_blkcg(async_chunk->blkcg_css);
1122 
1123 	/*
1124 	 * If async_chunk->locked_page is in the async_extent range, we need to
1125 	 * handle it.
1126 	 */
1127 	if (async_chunk->locked_page) {
1128 		u64 locked_page_start = page_offset(async_chunk->locked_page);
1129 		u64 locked_page_end = locked_page_start + PAGE_SIZE - 1;
1130 
1131 		if (!(start >= locked_page_end || end <= locked_page_start))
1132 			locked_page = async_chunk->locked_page;
1133 	}
1134 	lock_extent(io_tree, start, end, NULL);
1135 
1136 	if (async_extent->compress_type == BTRFS_COMPRESS_NONE) {
1137 		submit_uncompressed_range(inode, async_extent, locked_page);
1138 		goto done;
1139 	}
1140 
1141 	ret = btrfs_reserve_extent(root, async_extent->ram_size,
1142 				   async_extent->compressed_size,
1143 				   async_extent->compressed_size,
1144 				   0, *alloc_hint, &ins, 1, 1);
1145 	if (ret) {
1146 		/*
1147 		 * Here we used to try again by going back to non-compressed
1148 		 * path for ENOSPC.  But we can't reserve space even for
1149 		 * compressed size, how could it work for uncompressed size
1150 		 * which requires larger size?  So here we directly go error
1151 		 * path.
1152 		 */
1153 		goto out_free;
1154 	}
1155 
1156 	/* Here we're doing allocation and writeback of the compressed pages */
1157 	em = create_io_em(inode, start,
1158 			  async_extent->ram_size,	/* len */
1159 			  start,			/* orig_start */
1160 			  ins.objectid,			/* block_start */
1161 			  ins.offset,			/* block_len */
1162 			  ins.offset,			/* orig_block_len */
1163 			  async_extent->ram_size,	/* ram_bytes */
1164 			  async_extent->compress_type,
1165 			  BTRFS_ORDERED_COMPRESSED);
1166 	if (IS_ERR(em)) {
1167 		ret = PTR_ERR(em);
1168 		goto out_free_reserve;
1169 	}
1170 	free_extent_map(em);
1171 
1172 	ordered = btrfs_alloc_ordered_extent(inode, start,	/* file_offset */
1173 				       async_extent->ram_size,	/* num_bytes */
1174 				       async_extent->ram_size,	/* ram_bytes */
1175 				       ins.objectid,		/* disk_bytenr */
1176 				       ins.offset,		/* disk_num_bytes */
1177 				       0,			/* offset */
1178 				       1 << BTRFS_ORDERED_COMPRESSED,
1179 				       async_extent->compress_type);
1180 	if (IS_ERR(ordered)) {
1181 		btrfs_drop_extent_map_range(inode, start, end, false);
1182 		ret = PTR_ERR(ordered);
1183 		goto out_free_reserve;
1184 	}
1185 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1186 
1187 	/* Clear dirty, set writeback and unlock the pages. */
1188 	extent_clear_unlock_delalloc(inode, start, end,
1189 			NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
1190 			PAGE_UNLOCK | PAGE_START_WRITEBACK);
1191 	btrfs_submit_compressed_write(ordered,
1192 			    async_extent->pages,	/* compressed_pages */
1193 			    async_extent->nr_pages,
1194 			    async_chunk->write_flags, true);
1195 	*alloc_hint = ins.objectid + ins.offset;
1196 done:
1197 	if (async_chunk->blkcg_css)
1198 		kthread_associate_blkcg(NULL);
1199 	kfree(async_extent);
1200 	return;
1201 
1202 out_free_reserve:
1203 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1204 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
1205 out_free:
1206 	mapping_set_error(inode->vfs_inode.i_mapping, -EIO);
1207 	extent_clear_unlock_delalloc(inode, start, end,
1208 				     NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
1209 				     EXTENT_DELALLOC_NEW |
1210 				     EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
1211 				     PAGE_UNLOCK | PAGE_START_WRITEBACK |
1212 				     PAGE_END_WRITEBACK);
1213 	free_async_extent_pages(async_extent);
1214 	if (async_chunk->blkcg_css)
1215 		kthread_associate_blkcg(NULL);
1216 	btrfs_debug(fs_info,
1217 "async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d",
1218 		    root->root_key.objectid, btrfs_ino(inode), start,
1219 		    async_extent->ram_size, ret);
1220 	kfree(async_extent);
1221 }
1222 
1223 static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
1224 				      u64 num_bytes)
1225 {
1226 	struct extent_map_tree *em_tree = &inode->extent_tree;
1227 	struct extent_map *em;
1228 	u64 alloc_hint = 0;
1229 
1230 	read_lock(&em_tree->lock);
1231 	em = search_extent_mapping(em_tree, start, num_bytes);
1232 	if (em) {
1233 		/*
1234 		 * if block start isn't an actual block number then find the
1235 		 * first block in this inode and use that as a hint.  If that
1236 		 * block is also bogus then just don't worry about it.
1237 		 */
1238 		if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1239 			free_extent_map(em);
1240 			em = search_extent_mapping(em_tree, 0, 0);
1241 			if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
1242 				alloc_hint = em->block_start;
1243 			if (em)
1244 				free_extent_map(em);
1245 		} else {
1246 			alloc_hint = em->block_start;
1247 			free_extent_map(em);
1248 		}
1249 	}
1250 	read_unlock(&em_tree->lock);
1251 
1252 	return alloc_hint;
1253 }
1254 
1255 /*
1256  * when extent_io.c finds a delayed allocation range in the file,
1257  * the call backs end up in this code.  The basic idea is to
1258  * allocate extents on disk for the range, and create ordered data structs
1259  * in ram to track those extents.
1260  *
1261  * locked_page is the page that writepage had locked already.  We use
1262  * it to make sure we don't do extra locks or unlocks.
1263  *
1264  * When this function fails, it unlocks all pages except @locked_page.
1265  *
1266  * When this function successfully creates an inline extent, it returns 1 and
1267  * unlocks all pages including locked_page and starts I/O on them.
1268  * (In reality inline extents are limited to a single page, so locked_page is
1269  * the only page handled anyway).
1270  *
1271  * When this function succeed and creates a normal extent, the page locking
1272  * status depends on the passed in flags:
1273  *
1274  * - If @keep_locked is set, all pages are kept locked.
1275  * - Else all pages except for @locked_page are unlocked.
1276  *
1277  * When a failure happens in the second or later iteration of the
1278  * while-loop, the ordered extents created in previous iterations are kept
1279  * intact. So, the caller must clean them up by calling
1280  * btrfs_cleanup_ordered_extents(). See btrfs_run_delalloc_range() for
1281  * example.
1282  */
1283 static noinline int cow_file_range(struct btrfs_inode *inode,
1284 				   struct page *locked_page, u64 start, u64 end,
1285 				   u64 *done_offset,
1286 				   bool keep_locked, bool no_inline)
1287 {
1288 	struct btrfs_root *root = inode->root;
1289 	struct btrfs_fs_info *fs_info = root->fs_info;
1290 	u64 alloc_hint = 0;
1291 	u64 orig_start = start;
1292 	u64 num_bytes;
1293 	unsigned long ram_size;
1294 	u64 cur_alloc_size = 0;
1295 	u64 min_alloc_size;
1296 	u64 blocksize = fs_info->sectorsize;
1297 	struct btrfs_key ins;
1298 	struct extent_map *em;
1299 	unsigned clear_bits;
1300 	unsigned long page_ops;
1301 	bool extent_reserved = false;
1302 	int ret = 0;
1303 
1304 	if (btrfs_is_free_space_inode(inode)) {
1305 		ret = -EINVAL;
1306 		goto out_unlock;
1307 	}
1308 
1309 	num_bytes = ALIGN(end - start + 1, blocksize);
1310 	num_bytes = max(blocksize,  num_bytes);
1311 	ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy));
1312 
1313 	inode_should_defrag(inode, start, end, num_bytes, SZ_64K);
1314 
1315 	/*
1316 	 * Due to the page size limit, for subpage we can only trigger the
1317 	 * writeback for the dirty sectors of page, that means data writeback
1318 	 * is doing more writeback than what we want.
1319 	 *
1320 	 * This is especially unexpected for some call sites like fallocate,
1321 	 * where we only increase i_size after everything is done.
1322 	 * This means we can trigger inline extent even if we didn't want to.
1323 	 * So here we skip inline extent creation completely.
1324 	 */
1325 	if (start == 0 && fs_info->sectorsize == PAGE_SIZE && !no_inline) {
1326 		u64 actual_end = min_t(u64, i_size_read(&inode->vfs_inode),
1327 				       end + 1);
1328 
1329 		/* lets try to make an inline extent */
1330 		ret = cow_file_range_inline(inode, actual_end, 0,
1331 					    BTRFS_COMPRESS_NONE, NULL, false);
1332 		if (ret == 0) {
1333 			/*
1334 			 * We use DO_ACCOUNTING here because we need the
1335 			 * delalloc_release_metadata to be run _after_ we drop
1336 			 * our outstanding extent for clearing delalloc for this
1337 			 * range.
1338 			 */
1339 			extent_clear_unlock_delalloc(inode, start, end,
1340 				     locked_page,
1341 				     EXTENT_LOCKED | EXTENT_DELALLOC |
1342 				     EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
1343 				     EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1344 				     PAGE_START_WRITEBACK | PAGE_END_WRITEBACK);
1345 			/*
1346 			 * locked_page is locked by the caller of
1347 			 * writepage_delalloc(), not locked by
1348 			 * __process_pages_contig().
1349 			 *
1350 			 * We can't let __process_pages_contig() to unlock it,
1351 			 * as it doesn't have any subpage::writers recorded.
1352 			 *
1353 			 * Here we manually unlock the page, since the caller
1354 			 * can't determine if it's an inline extent or a
1355 			 * compressed extent.
1356 			 */
1357 			unlock_page(locked_page);
1358 			ret = 1;
1359 			goto done;
1360 		} else if (ret < 0) {
1361 			goto out_unlock;
1362 		}
1363 	}
1364 
1365 	alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
1366 
1367 	/*
1368 	 * Relocation relies on the relocated extents to have exactly the same
1369 	 * size as the original extents. Normally writeback for relocation data
1370 	 * extents follows a NOCOW path because relocation preallocates the
1371 	 * extents. However, due to an operation such as scrub turning a block
1372 	 * group to RO mode, it may fallback to COW mode, so we must make sure
1373 	 * an extent allocated during COW has exactly the requested size and can
1374 	 * not be split into smaller extents, otherwise relocation breaks and
1375 	 * fails during the stage where it updates the bytenr of file extent
1376 	 * items.
1377 	 */
1378 	if (btrfs_is_data_reloc_root(root))
1379 		min_alloc_size = num_bytes;
1380 	else
1381 		min_alloc_size = fs_info->sectorsize;
1382 
1383 	while (num_bytes > 0) {
1384 		struct btrfs_ordered_extent *ordered;
1385 
1386 		cur_alloc_size = num_bytes;
1387 		ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
1388 					   min_alloc_size, 0, alloc_hint,
1389 					   &ins, 1, 1);
1390 		if (ret == -EAGAIN) {
1391 			/*
1392 			 * btrfs_reserve_extent only returns -EAGAIN for zoned
1393 			 * file systems, which is an indication that there are
1394 			 * no active zones to allocate from at the moment.
1395 			 *
1396 			 * If this is the first loop iteration, wait for at
1397 			 * least one zone to finish before retrying the
1398 			 * allocation.  Otherwise ask the caller to write out
1399 			 * the already allocated blocks before coming back to
1400 			 * us, or return -ENOSPC if it can't handle retries.
1401 			 */
1402 			ASSERT(btrfs_is_zoned(fs_info));
1403 			if (start == orig_start) {
1404 				wait_on_bit_io(&inode->root->fs_info->flags,
1405 					       BTRFS_FS_NEED_ZONE_FINISH,
1406 					       TASK_UNINTERRUPTIBLE);
1407 				continue;
1408 			}
1409 			if (done_offset) {
1410 				*done_offset = start - 1;
1411 				return 0;
1412 			}
1413 			ret = -ENOSPC;
1414 		}
1415 		if (ret < 0)
1416 			goto out_unlock;
1417 		cur_alloc_size = ins.offset;
1418 		extent_reserved = true;
1419 
1420 		ram_size = ins.offset;
1421 		em = create_io_em(inode, start, ins.offset, /* len */
1422 				  start, /* orig_start */
1423 				  ins.objectid, /* block_start */
1424 				  ins.offset, /* block_len */
1425 				  ins.offset, /* orig_block_len */
1426 				  ram_size, /* ram_bytes */
1427 				  BTRFS_COMPRESS_NONE, /* compress_type */
1428 				  BTRFS_ORDERED_REGULAR /* type */);
1429 		if (IS_ERR(em)) {
1430 			ret = PTR_ERR(em);
1431 			goto out_reserve;
1432 		}
1433 		free_extent_map(em);
1434 
1435 		ordered = btrfs_alloc_ordered_extent(inode, start, ram_size,
1436 					ram_size, ins.objectid, cur_alloc_size,
1437 					0, 1 << BTRFS_ORDERED_REGULAR,
1438 					BTRFS_COMPRESS_NONE);
1439 		if (IS_ERR(ordered)) {
1440 			ret = PTR_ERR(ordered);
1441 			goto out_drop_extent_cache;
1442 		}
1443 
1444 		if (btrfs_is_data_reloc_root(root)) {
1445 			ret = btrfs_reloc_clone_csums(ordered);
1446 
1447 			/*
1448 			 * Only drop cache here, and process as normal.
1449 			 *
1450 			 * We must not allow extent_clear_unlock_delalloc()
1451 			 * at out_unlock label to free meta of this ordered
1452 			 * extent, as its meta should be freed by
1453 			 * btrfs_finish_ordered_io().
1454 			 *
1455 			 * So we must continue until @start is increased to
1456 			 * skip current ordered extent.
1457 			 */
1458 			if (ret)
1459 				btrfs_drop_extent_map_range(inode, start,
1460 							    start + ram_size - 1,
1461 							    false);
1462 		}
1463 		btrfs_put_ordered_extent(ordered);
1464 
1465 		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1466 
1467 		/*
1468 		 * We're not doing compressed IO, don't unlock the first page
1469 		 * (which the caller expects to stay locked), don't clear any
1470 		 * dirty bits and don't set any writeback bits
1471 		 *
1472 		 * Do set the Ordered (Private2) bit so we know this page was
1473 		 * properly setup for writepage.
1474 		 */
1475 		page_ops = (keep_locked ? 0 : PAGE_UNLOCK);
1476 		page_ops |= PAGE_SET_ORDERED;
1477 
1478 		extent_clear_unlock_delalloc(inode, start, start + ram_size - 1,
1479 					     locked_page,
1480 					     EXTENT_LOCKED | EXTENT_DELALLOC,
1481 					     page_ops);
1482 		if (num_bytes < cur_alloc_size)
1483 			num_bytes = 0;
1484 		else
1485 			num_bytes -= cur_alloc_size;
1486 		alloc_hint = ins.objectid + ins.offset;
1487 		start += cur_alloc_size;
1488 		extent_reserved = false;
1489 
1490 		/*
1491 		 * btrfs_reloc_clone_csums() error, since start is increased
1492 		 * extent_clear_unlock_delalloc() at out_unlock label won't
1493 		 * free metadata of current ordered extent, we're OK to exit.
1494 		 */
1495 		if (ret)
1496 			goto out_unlock;
1497 	}
1498 done:
1499 	if (done_offset)
1500 		*done_offset = end;
1501 	return ret;
1502 
1503 out_drop_extent_cache:
1504 	btrfs_drop_extent_map_range(inode, start, start + ram_size - 1, false);
1505 out_reserve:
1506 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1507 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
1508 out_unlock:
1509 	/*
1510 	 * Now, we have three regions to clean up:
1511 	 *
1512 	 * |-------(1)----|---(2)---|-------------(3)----------|
1513 	 * `- orig_start  `- start  `- start + cur_alloc_size  `- end
1514 	 *
1515 	 * We process each region below.
1516 	 */
1517 
1518 	clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
1519 		EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
1520 	page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
1521 
1522 	/*
1523 	 * For the range (1). We have already instantiated the ordered extents
1524 	 * for this region. They are cleaned up by
1525 	 * btrfs_cleanup_ordered_extents() in e.g,
1526 	 * btrfs_run_delalloc_range(). EXTENT_LOCKED | EXTENT_DELALLOC are
1527 	 * already cleared in the above loop. And, EXTENT_DELALLOC_NEW |
1528 	 * EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV are handled by the cleanup
1529 	 * function.
1530 	 *
1531 	 * However, in case of @keep_locked, we still need to unlock the pages
1532 	 * (except @locked_page) to ensure all the pages are unlocked.
1533 	 */
1534 	if (keep_locked && orig_start < start) {
1535 		if (!locked_page)
1536 			mapping_set_error(inode->vfs_inode.i_mapping, ret);
1537 		extent_clear_unlock_delalloc(inode, orig_start, start - 1,
1538 					     locked_page, 0, page_ops);
1539 	}
1540 
1541 	/*
1542 	 * For the range (2). If we reserved an extent for our delalloc range
1543 	 * (or a subrange) and failed to create the respective ordered extent,
1544 	 * then it means that when we reserved the extent we decremented the
1545 	 * extent's size from the data space_info's bytes_may_use counter and
1546 	 * incremented the space_info's bytes_reserved counter by the same
1547 	 * amount. We must make sure extent_clear_unlock_delalloc() does not try
1548 	 * to decrement again the data space_info's bytes_may_use counter,
1549 	 * therefore we do not pass it the flag EXTENT_CLEAR_DATA_RESV.
1550 	 */
1551 	if (extent_reserved) {
1552 		extent_clear_unlock_delalloc(inode, start,
1553 					     start + cur_alloc_size - 1,
1554 					     locked_page,
1555 					     clear_bits,
1556 					     page_ops);
1557 		start += cur_alloc_size;
1558 	}
1559 
1560 	/*
1561 	 * For the range (3). We never touched the region. In addition to the
1562 	 * clear_bits above, we add EXTENT_CLEAR_DATA_RESV to release the data
1563 	 * space_info's bytes_may_use counter, reserved in
1564 	 * btrfs_check_data_free_space().
1565 	 */
1566 	if (start < end) {
1567 		clear_bits |= EXTENT_CLEAR_DATA_RESV;
1568 		extent_clear_unlock_delalloc(inode, start, end, locked_page,
1569 					     clear_bits, page_ops);
1570 	}
1571 	return ret;
1572 }
1573 
1574 /*
1575  * Phase two of compressed writeback.  This is the ordered portion of the code,
1576  * which only gets called in the order the work was queued.  We walk all the
1577  * async extents created by compress_file_range and send them down to the disk.
1578  *
1579  * If called with @do_free == true then it'll try to finish the work and free
1580  * the work struct eventually.
1581  */
1582 static noinline void submit_compressed_extents(struct btrfs_work *work, bool do_free)
1583 {
1584 	struct async_chunk *async_chunk = container_of(work, struct async_chunk,
1585 						     work);
1586 	struct btrfs_fs_info *fs_info = btrfs_work_owner(work);
1587 	struct async_extent *async_extent;
1588 	unsigned long nr_pages;
1589 	u64 alloc_hint = 0;
1590 
1591 	if (do_free) {
1592 		struct async_chunk *async_chunk;
1593 		struct async_cow *async_cow;
1594 
1595 		async_chunk = container_of(work, struct async_chunk, work);
1596 		btrfs_add_delayed_iput(async_chunk->inode);
1597 		if (async_chunk->blkcg_css)
1598 			css_put(async_chunk->blkcg_css);
1599 
1600 		async_cow = async_chunk->async_cow;
1601 		if (atomic_dec_and_test(&async_cow->num_chunks))
1602 			kvfree(async_cow);
1603 		return;
1604 	}
1605 
1606 	nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
1607 		PAGE_SHIFT;
1608 
1609 	while (!list_empty(&async_chunk->extents)) {
1610 		async_extent = list_entry(async_chunk->extents.next,
1611 					  struct async_extent, list);
1612 		list_del(&async_extent->list);
1613 		submit_one_async_extent(async_chunk, async_extent, &alloc_hint);
1614 	}
1615 
1616 	/* atomic_sub_return implies a barrier */
1617 	if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
1618 	    5 * SZ_1M)
1619 		cond_wake_up_nomb(&fs_info->async_submit_wait);
1620 }
1621 
1622 static bool run_delalloc_compressed(struct btrfs_inode *inode,
1623 				    struct page *locked_page, u64 start,
1624 				    u64 end, struct writeback_control *wbc)
1625 {
1626 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1627 	struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc);
1628 	struct async_cow *ctx;
1629 	struct async_chunk *async_chunk;
1630 	unsigned long nr_pages;
1631 	u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K);
1632 	int i;
1633 	unsigned nofs_flag;
1634 	const blk_opf_t write_flags = wbc_to_write_flags(wbc);
1635 
1636 	nofs_flag = memalloc_nofs_save();
1637 	ctx = kvmalloc(struct_size(ctx, chunks, num_chunks), GFP_KERNEL);
1638 	memalloc_nofs_restore(nofs_flag);
1639 	if (!ctx)
1640 		return false;
1641 
1642 	unlock_extent(&inode->io_tree, start, end, NULL);
1643 	set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags);
1644 
1645 	async_chunk = ctx->chunks;
1646 	atomic_set(&ctx->num_chunks, num_chunks);
1647 
1648 	for (i = 0; i < num_chunks; i++) {
1649 		u64 cur_end = min(end, start + SZ_512K - 1);
1650 
1651 		/*
1652 		 * igrab is called higher up in the call chain, take only the
1653 		 * lightweight reference for the callback lifetime
1654 		 */
1655 		ihold(&inode->vfs_inode);
1656 		async_chunk[i].async_cow = ctx;
1657 		async_chunk[i].inode = inode;
1658 		async_chunk[i].start = start;
1659 		async_chunk[i].end = cur_end;
1660 		async_chunk[i].write_flags = write_flags;
1661 		INIT_LIST_HEAD(&async_chunk[i].extents);
1662 
1663 		/*
1664 		 * The locked_page comes all the way from writepage and its
1665 		 * the original page we were actually given.  As we spread
1666 		 * this large delalloc region across multiple async_chunk
1667 		 * structs, only the first struct needs a pointer to locked_page
1668 		 *
1669 		 * This way we don't need racey decisions about who is supposed
1670 		 * to unlock it.
1671 		 */
1672 		if (locked_page) {
1673 			/*
1674 			 * Depending on the compressibility, the pages might or
1675 			 * might not go through async.  We want all of them to
1676 			 * be accounted against wbc once.  Let's do it here
1677 			 * before the paths diverge.  wbc accounting is used
1678 			 * only for foreign writeback detection and doesn't
1679 			 * need full accuracy.  Just account the whole thing
1680 			 * against the first page.
1681 			 */
1682 			wbc_account_cgroup_owner(wbc, locked_page,
1683 						 cur_end - start);
1684 			async_chunk[i].locked_page = locked_page;
1685 			locked_page = NULL;
1686 		} else {
1687 			async_chunk[i].locked_page = NULL;
1688 		}
1689 
1690 		if (blkcg_css != blkcg_root_css) {
1691 			css_get(blkcg_css);
1692 			async_chunk[i].blkcg_css = blkcg_css;
1693 			async_chunk[i].write_flags |= REQ_BTRFS_CGROUP_PUNT;
1694 		} else {
1695 			async_chunk[i].blkcg_css = NULL;
1696 		}
1697 
1698 		btrfs_init_work(&async_chunk[i].work, compress_file_range,
1699 				submit_compressed_extents);
1700 
1701 		nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
1702 		atomic_add(nr_pages, &fs_info->async_delalloc_pages);
1703 
1704 		btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work);
1705 
1706 		start = cur_end + 1;
1707 	}
1708 	return true;
1709 }
1710 
1711 /*
1712  * Run the delalloc range from start to end, and write back any dirty pages
1713  * covered by the range.
1714  */
1715 static noinline int run_delalloc_cow(struct btrfs_inode *inode,
1716 				     struct page *locked_page, u64 start,
1717 				     u64 end, struct writeback_control *wbc,
1718 				     bool pages_dirty)
1719 {
1720 	u64 done_offset = end;
1721 	int ret;
1722 
1723 	while (start <= end) {
1724 		ret = cow_file_range(inode, locked_page, start, end, &done_offset,
1725 				     true, false);
1726 		if (ret)
1727 			return ret;
1728 		extent_write_locked_range(&inode->vfs_inode, locked_page, start,
1729 					  done_offset, wbc, pages_dirty);
1730 		start = done_offset + 1;
1731 	}
1732 
1733 	return 1;
1734 }
1735 
1736 static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
1737 					u64 bytenr, u64 num_bytes, bool nowait)
1738 {
1739 	struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bytenr);
1740 	struct btrfs_ordered_sum *sums;
1741 	int ret;
1742 	LIST_HEAD(list);
1743 
1744 	ret = btrfs_lookup_csums_list(csum_root, bytenr, bytenr + num_bytes - 1,
1745 				      &list, 0, nowait);
1746 	if (ret == 0 && list_empty(&list))
1747 		return 0;
1748 
1749 	while (!list_empty(&list)) {
1750 		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1751 		list_del(&sums->list);
1752 		kfree(sums);
1753 	}
1754 	if (ret < 0)
1755 		return ret;
1756 	return 1;
1757 }
1758 
1759 static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page,
1760 			   const u64 start, const u64 end)
1761 {
1762 	const bool is_space_ino = btrfs_is_free_space_inode(inode);
1763 	const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root);
1764 	const u64 range_bytes = end + 1 - start;
1765 	struct extent_io_tree *io_tree = &inode->io_tree;
1766 	u64 range_start = start;
1767 	u64 count;
1768 	int ret;
1769 
1770 	/*
1771 	 * If EXTENT_NORESERVE is set it means that when the buffered write was
1772 	 * made we had not enough available data space and therefore we did not
1773 	 * reserve data space for it, since we though we could do NOCOW for the
1774 	 * respective file range (either there is prealloc extent or the inode
1775 	 * has the NOCOW bit set).
1776 	 *
1777 	 * However when we need to fallback to COW mode (because for example the
1778 	 * block group for the corresponding extent was turned to RO mode by a
1779 	 * scrub or relocation) we need to do the following:
1780 	 *
1781 	 * 1) We increment the bytes_may_use counter of the data space info.
1782 	 *    If COW succeeds, it allocates a new data extent and after doing
1783 	 *    that it decrements the space info's bytes_may_use counter and
1784 	 *    increments its bytes_reserved counter by the same amount (we do
1785 	 *    this at btrfs_add_reserved_bytes()). So we need to increment the
1786 	 *    bytes_may_use counter to compensate (when space is reserved at
1787 	 *    buffered write time, the bytes_may_use counter is incremented);
1788 	 *
1789 	 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so
1790 	 *    that if the COW path fails for any reason, it decrements (through
1791 	 *    extent_clear_unlock_delalloc()) the bytes_may_use counter of the
1792 	 *    data space info, which we incremented in the step above.
1793 	 *
1794 	 * If we need to fallback to cow and the inode corresponds to a free
1795 	 * space cache inode or an inode of the data relocation tree, we must
1796 	 * also increment bytes_may_use of the data space_info for the same
1797 	 * reason. Space caches and relocated data extents always get a prealloc
1798 	 * extent for them, however scrub or balance may have set the block
1799 	 * group that contains that extent to RO mode and therefore force COW
1800 	 * when starting writeback.
1801 	 */
1802 	count = count_range_bits(io_tree, &range_start, end, range_bytes,
1803 				 EXTENT_NORESERVE, 0, NULL);
1804 	if (count > 0 || is_space_ino || is_reloc_ino) {
1805 		u64 bytes = count;
1806 		struct btrfs_fs_info *fs_info = inode->root->fs_info;
1807 		struct btrfs_space_info *sinfo = fs_info->data_sinfo;
1808 
1809 		if (is_space_ino || is_reloc_ino)
1810 			bytes = range_bytes;
1811 
1812 		spin_lock(&sinfo->lock);
1813 		btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes);
1814 		spin_unlock(&sinfo->lock);
1815 
1816 		if (count > 0)
1817 			clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE,
1818 					 NULL);
1819 	}
1820 
1821 	/*
1822 	 * Don't try to create inline extents, as a mix of inline extent that
1823 	 * is written out and unlocked directly and a normal NOCOW extent
1824 	 * doesn't work.
1825 	 */
1826 	ret = cow_file_range(inode, locked_page, start, end, NULL, false, true);
1827 	ASSERT(ret != 1);
1828 	return ret;
1829 }
1830 
1831 struct can_nocow_file_extent_args {
1832 	/* Input fields. */
1833 
1834 	/* Start file offset of the range we want to NOCOW. */
1835 	u64 start;
1836 	/* End file offset (inclusive) of the range we want to NOCOW. */
1837 	u64 end;
1838 	bool writeback_path;
1839 	bool strict;
1840 	/*
1841 	 * Free the path passed to can_nocow_file_extent() once it's not needed
1842 	 * anymore.
1843 	 */
1844 	bool free_path;
1845 
1846 	/* Output fields. Only set when can_nocow_file_extent() returns 1. */
1847 
1848 	u64 disk_bytenr;
1849 	u64 disk_num_bytes;
1850 	u64 extent_offset;
1851 	/* Number of bytes that can be written to in NOCOW mode. */
1852 	u64 num_bytes;
1853 };
1854 
1855 /*
1856  * Check if we can NOCOW the file extent that the path points to.
1857  * This function may return with the path released, so the caller should check
1858  * if path->nodes[0] is NULL or not if it needs to use the path afterwards.
1859  *
1860  * Returns: < 0 on error
1861  *            0 if we can not NOCOW
1862  *            1 if we can NOCOW
1863  */
1864 static int can_nocow_file_extent(struct btrfs_path *path,
1865 				 struct btrfs_key *key,
1866 				 struct btrfs_inode *inode,
1867 				 struct can_nocow_file_extent_args *args)
1868 {
1869 	const bool is_freespace_inode = btrfs_is_free_space_inode(inode);
1870 	struct extent_buffer *leaf = path->nodes[0];
1871 	struct btrfs_root *root = inode->root;
1872 	struct btrfs_file_extent_item *fi;
1873 	u64 extent_end;
1874 	u8 extent_type;
1875 	int can_nocow = 0;
1876 	int ret = 0;
1877 	bool nowait = path->nowait;
1878 
1879 	fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
1880 	extent_type = btrfs_file_extent_type(leaf, fi);
1881 
1882 	if (extent_type == BTRFS_FILE_EXTENT_INLINE)
1883 		goto out;
1884 
1885 	/* Can't access these fields unless we know it's not an inline extent. */
1886 	args->disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1887 	args->disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1888 	args->extent_offset = btrfs_file_extent_offset(leaf, fi);
1889 
1890 	if (!(inode->flags & BTRFS_INODE_NODATACOW) &&
1891 	    extent_type == BTRFS_FILE_EXTENT_REG)
1892 		goto out;
1893 
1894 	/*
1895 	 * If the extent was created before the generation where the last snapshot
1896 	 * for its subvolume was created, then this implies the extent is shared,
1897 	 * hence we must COW.
1898 	 */
1899 	if (!args->strict &&
1900 	    btrfs_file_extent_generation(leaf, fi) <=
1901 	    btrfs_root_last_snapshot(&root->root_item))
1902 		goto out;
1903 
1904 	/* An explicit hole, must COW. */
1905 	if (args->disk_bytenr == 0)
1906 		goto out;
1907 
1908 	/* Compressed/encrypted/encoded extents must be COWed. */
1909 	if (btrfs_file_extent_compression(leaf, fi) ||
1910 	    btrfs_file_extent_encryption(leaf, fi) ||
1911 	    btrfs_file_extent_other_encoding(leaf, fi))
1912 		goto out;
1913 
1914 	extent_end = btrfs_file_extent_end(path);
1915 
1916 	/*
1917 	 * The following checks can be expensive, as they need to take other
1918 	 * locks and do btree or rbtree searches, so release the path to avoid
1919 	 * blocking other tasks for too long.
1920 	 */
1921 	btrfs_release_path(path);
1922 
1923 	ret = btrfs_cross_ref_exist(root, btrfs_ino(inode),
1924 				    key->offset - args->extent_offset,
1925 				    args->disk_bytenr, args->strict, path);
1926 	WARN_ON_ONCE(ret > 0 && is_freespace_inode);
1927 	if (ret != 0)
1928 		goto out;
1929 
1930 	if (args->free_path) {
1931 		/*
1932 		 * We don't need the path anymore, plus through the
1933 		 * csum_exist_in_range() call below we will end up allocating
1934 		 * another path. So free the path to avoid unnecessary extra
1935 		 * memory usage.
1936 		 */
1937 		btrfs_free_path(path);
1938 		path = NULL;
1939 	}
1940 
1941 	/* If there are pending snapshots for this root, we must COW. */
1942 	if (args->writeback_path && !is_freespace_inode &&
1943 	    atomic_read(&root->snapshot_force_cow))
1944 		goto out;
1945 
1946 	args->disk_bytenr += args->extent_offset;
1947 	args->disk_bytenr += args->start - key->offset;
1948 	args->num_bytes = min(args->end + 1, extent_end) - args->start;
1949 
1950 	/*
1951 	 * Force COW if csums exist in the range. This ensures that csums for a
1952 	 * given extent are either valid or do not exist.
1953 	 */
1954 	ret = csum_exist_in_range(root->fs_info, args->disk_bytenr, args->num_bytes,
1955 				  nowait);
1956 	WARN_ON_ONCE(ret > 0 && is_freespace_inode);
1957 	if (ret != 0)
1958 		goto out;
1959 
1960 	can_nocow = 1;
1961  out:
1962 	if (args->free_path && path)
1963 		btrfs_free_path(path);
1964 
1965 	return ret < 0 ? ret : can_nocow;
1966 }
1967 
1968 /*
1969  * when nowcow writeback call back.  This checks for snapshots or COW copies
1970  * of the extents that exist in the file, and COWs the file as required.
1971  *
1972  * If no cow copies or snapshots exist, we write directly to the existing
1973  * blocks on disk
1974  */
1975 static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
1976 				       struct page *locked_page,
1977 				       const u64 start, const u64 end)
1978 {
1979 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1980 	struct btrfs_root *root = inode->root;
1981 	struct btrfs_path *path;
1982 	u64 cow_start = (u64)-1;
1983 	u64 cur_offset = start;
1984 	int ret;
1985 	bool check_prev = true;
1986 	u64 ino = btrfs_ino(inode);
1987 	struct can_nocow_file_extent_args nocow_args = { 0 };
1988 
1989 	/*
1990 	 * Normally on a zoned device we're only doing COW writes, but in case
1991 	 * of relocation on a zoned filesystem serializes I/O so that we're only
1992 	 * writing sequentially and can end up here as well.
1993 	 */
1994 	ASSERT(!btrfs_is_zoned(fs_info) || btrfs_is_data_reloc_root(root));
1995 
1996 	path = btrfs_alloc_path();
1997 	if (!path) {
1998 		ret = -ENOMEM;
1999 		goto error;
2000 	}
2001 
2002 	nocow_args.end = end;
2003 	nocow_args.writeback_path = true;
2004 
2005 	while (1) {
2006 		struct btrfs_block_group *nocow_bg = NULL;
2007 		struct btrfs_ordered_extent *ordered;
2008 		struct btrfs_key found_key;
2009 		struct btrfs_file_extent_item *fi;
2010 		struct extent_buffer *leaf;
2011 		u64 extent_end;
2012 		u64 ram_bytes;
2013 		u64 nocow_end;
2014 		int extent_type;
2015 		bool is_prealloc;
2016 
2017 		ret = btrfs_lookup_file_extent(NULL, root, path, ino,
2018 					       cur_offset, 0);
2019 		if (ret < 0)
2020 			goto error;
2021 
2022 		/*
2023 		 * If there is no extent for our range when doing the initial
2024 		 * search, then go back to the previous slot as it will be the
2025 		 * one containing the search offset
2026 		 */
2027 		if (ret > 0 && path->slots[0] > 0 && check_prev) {
2028 			leaf = path->nodes[0];
2029 			btrfs_item_key_to_cpu(leaf, &found_key,
2030 					      path->slots[0] - 1);
2031 			if (found_key.objectid == ino &&
2032 			    found_key.type == BTRFS_EXTENT_DATA_KEY)
2033 				path->slots[0]--;
2034 		}
2035 		check_prev = false;
2036 next_slot:
2037 		/* Go to next leaf if we have exhausted the current one */
2038 		leaf = path->nodes[0];
2039 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2040 			ret = btrfs_next_leaf(root, path);
2041 			if (ret < 0)
2042 				goto error;
2043 			if (ret > 0)
2044 				break;
2045 			leaf = path->nodes[0];
2046 		}
2047 
2048 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2049 
2050 		/* Didn't find anything for our INO */
2051 		if (found_key.objectid > ino)
2052 			break;
2053 		/*
2054 		 * Keep searching until we find an EXTENT_ITEM or there are no
2055 		 * more extents for this inode
2056 		 */
2057 		if (WARN_ON_ONCE(found_key.objectid < ino) ||
2058 		    found_key.type < BTRFS_EXTENT_DATA_KEY) {
2059 			path->slots[0]++;
2060 			goto next_slot;
2061 		}
2062 
2063 		/* Found key is not EXTENT_DATA_KEY or starts after req range */
2064 		if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
2065 		    found_key.offset > end)
2066 			break;
2067 
2068 		/*
2069 		 * If the found extent starts after requested offset, then
2070 		 * adjust extent_end to be right before this extent begins
2071 		 */
2072 		if (found_key.offset > cur_offset) {
2073 			extent_end = found_key.offset;
2074 			extent_type = 0;
2075 			goto must_cow;
2076 		}
2077 
2078 		/*
2079 		 * Found extent which begins before our range and potentially
2080 		 * intersect it
2081 		 */
2082 		fi = btrfs_item_ptr(leaf, path->slots[0],
2083 				    struct btrfs_file_extent_item);
2084 		extent_type = btrfs_file_extent_type(leaf, fi);
2085 		/* If this is triggered then we have a memory corruption. */
2086 		ASSERT(extent_type < BTRFS_NR_FILE_EXTENT_TYPES);
2087 		if (WARN_ON(extent_type >= BTRFS_NR_FILE_EXTENT_TYPES)) {
2088 			ret = -EUCLEAN;
2089 			goto error;
2090 		}
2091 		ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
2092 		extent_end = btrfs_file_extent_end(path);
2093 
2094 		/*
2095 		 * If the extent we got ends before our current offset, skip to
2096 		 * the next extent.
2097 		 */
2098 		if (extent_end <= cur_offset) {
2099 			path->slots[0]++;
2100 			goto next_slot;
2101 		}
2102 
2103 		nocow_args.start = cur_offset;
2104 		ret = can_nocow_file_extent(path, &found_key, inode, &nocow_args);
2105 		if (ret < 0)
2106 			goto error;
2107 		if (ret == 0)
2108 			goto must_cow;
2109 
2110 		ret = 0;
2111 		nocow_bg = btrfs_inc_nocow_writers(fs_info, nocow_args.disk_bytenr);
2112 		if (!nocow_bg) {
2113 must_cow:
2114 			/*
2115 			 * If we can't perform NOCOW writeback for the range,
2116 			 * then record the beginning of the range that needs to
2117 			 * be COWed.  It will be written out before the next
2118 			 * NOCOW range if we find one, or when exiting this
2119 			 * loop.
2120 			 */
2121 			if (cow_start == (u64)-1)
2122 				cow_start = cur_offset;
2123 			cur_offset = extent_end;
2124 			if (cur_offset > end)
2125 				break;
2126 			if (!path->nodes[0])
2127 				continue;
2128 			path->slots[0]++;
2129 			goto next_slot;
2130 		}
2131 
2132 		/*
2133 		 * COW range from cow_start to found_key.offset - 1. As the key
2134 		 * will contain the beginning of the first extent that can be
2135 		 * NOCOW, following one which needs to be COW'ed
2136 		 */
2137 		if (cow_start != (u64)-1) {
2138 			ret = fallback_to_cow(inode, locked_page,
2139 					      cow_start, found_key.offset - 1);
2140 			cow_start = (u64)-1;
2141 			if (ret) {
2142 				btrfs_dec_nocow_writers(nocow_bg);
2143 				goto error;
2144 			}
2145 		}
2146 
2147 		nocow_end = cur_offset + nocow_args.num_bytes - 1;
2148 		is_prealloc = extent_type == BTRFS_FILE_EXTENT_PREALLOC;
2149 		if (is_prealloc) {
2150 			u64 orig_start = found_key.offset - nocow_args.extent_offset;
2151 			struct extent_map *em;
2152 
2153 			em = create_io_em(inode, cur_offset, nocow_args.num_bytes,
2154 					  orig_start,
2155 					  nocow_args.disk_bytenr, /* block_start */
2156 					  nocow_args.num_bytes, /* block_len */
2157 					  nocow_args.disk_num_bytes, /* orig_block_len */
2158 					  ram_bytes, BTRFS_COMPRESS_NONE,
2159 					  BTRFS_ORDERED_PREALLOC);
2160 			if (IS_ERR(em)) {
2161 				btrfs_dec_nocow_writers(nocow_bg);
2162 				ret = PTR_ERR(em);
2163 				goto error;
2164 			}
2165 			free_extent_map(em);
2166 		}
2167 
2168 		ordered = btrfs_alloc_ordered_extent(inode, cur_offset,
2169 				nocow_args.num_bytes, nocow_args.num_bytes,
2170 				nocow_args.disk_bytenr, nocow_args.num_bytes, 0,
2171 				is_prealloc
2172 				? (1 << BTRFS_ORDERED_PREALLOC)
2173 				: (1 << BTRFS_ORDERED_NOCOW),
2174 				BTRFS_COMPRESS_NONE);
2175 		btrfs_dec_nocow_writers(nocow_bg);
2176 		if (IS_ERR(ordered)) {
2177 			if (is_prealloc) {
2178 				btrfs_drop_extent_map_range(inode, cur_offset,
2179 							    nocow_end, false);
2180 			}
2181 			ret = PTR_ERR(ordered);
2182 			goto error;
2183 		}
2184 
2185 		if (btrfs_is_data_reloc_root(root))
2186 			/*
2187 			 * Error handled later, as we must prevent
2188 			 * extent_clear_unlock_delalloc() in error handler
2189 			 * from freeing metadata of created ordered extent.
2190 			 */
2191 			ret = btrfs_reloc_clone_csums(ordered);
2192 		btrfs_put_ordered_extent(ordered);
2193 
2194 		extent_clear_unlock_delalloc(inode, cur_offset, nocow_end,
2195 					     locked_page, EXTENT_LOCKED |
2196 					     EXTENT_DELALLOC |
2197 					     EXTENT_CLEAR_DATA_RESV,
2198 					     PAGE_UNLOCK | PAGE_SET_ORDERED);
2199 
2200 		cur_offset = extent_end;
2201 
2202 		/*
2203 		 * btrfs_reloc_clone_csums() error, now we're OK to call error
2204 		 * handler, as metadata for created ordered extent will only
2205 		 * be freed by btrfs_finish_ordered_io().
2206 		 */
2207 		if (ret)
2208 			goto error;
2209 		if (cur_offset > end)
2210 			break;
2211 	}
2212 	btrfs_release_path(path);
2213 
2214 	if (cur_offset <= end && cow_start == (u64)-1)
2215 		cow_start = cur_offset;
2216 
2217 	if (cow_start != (u64)-1) {
2218 		cur_offset = end;
2219 		ret = fallback_to_cow(inode, locked_page, cow_start, end);
2220 		cow_start = (u64)-1;
2221 		if (ret)
2222 			goto error;
2223 	}
2224 
2225 	btrfs_free_path(path);
2226 	return 0;
2227 
2228 error:
2229 	/*
2230 	 * If an error happened while a COW region is outstanding, cur_offset
2231 	 * needs to be reset to cow_start to ensure the COW region is unlocked
2232 	 * as well.
2233 	 */
2234 	if (cow_start != (u64)-1)
2235 		cur_offset = cow_start;
2236 	if (cur_offset < end)
2237 		extent_clear_unlock_delalloc(inode, cur_offset, end,
2238 					     locked_page, EXTENT_LOCKED |
2239 					     EXTENT_DELALLOC | EXTENT_DEFRAG |
2240 					     EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
2241 					     PAGE_START_WRITEBACK |
2242 					     PAGE_END_WRITEBACK);
2243 	btrfs_free_path(path);
2244 	return ret;
2245 }
2246 
2247 static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end)
2248 {
2249 	if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) {
2250 		if (inode->defrag_bytes &&
2251 		    test_range_bit_exists(&inode->io_tree, start, end, EXTENT_DEFRAG))
2252 			return false;
2253 		return true;
2254 	}
2255 	return false;
2256 }
2257 
2258 /*
2259  * Function to process delayed allocation (create CoW) for ranges which are
2260  * being touched for the first time.
2261  */
2262 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page,
2263 			     u64 start, u64 end, struct writeback_control *wbc)
2264 {
2265 	const bool zoned = btrfs_is_zoned(inode->root->fs_info);
2266 	int ret;
2267 
2268 	/*
2269 	 * The range must cover part of the @locked_page, or a return of 1
2270 	 * can confuse the caller.
2271 	 */
2272 	ASSERT(!(end <= page_offset(locked_page) ||
2273 		 start >= page_offset(locked_page) + PAGE_SIZE));
2274 
2275 	if (should_nocow(inode, start, end)) {
2276 		ret = run_delalloc_nocow(inode, locked_page, start, end);
2277 		goto out;
2278 	}
2279 
2280 	if (btrfs_inode_can_compress(inode) &&
2281 	    inode_need_compress(inode, start, end) &&
2282 	    run_delalloc_compressed(inode, locked_page, start, end, wbc))
2283 		return 1;
2284 
2285 	if (zoned)
2286 		ret = run_delalloc_cow(inode, locked_page, start, end, wbc,
2287 				       true);
2288 	else
2289 		ret = cow_file_range(inode, locked_page, start, end, NULL,
2290 				     false, false);
2291 
2292 out:
2293 	if (ret < 0)
2294 		btrfs_cleanup_ordered_extents(inode, locked_page, start,
2295 					      end - start + 1);
2296 	return ret;
2297 }
2298 
2299 void btrfs_split_delalloc_extent(struct btrfs_inode *inode,
2300 				 struct extent_state *orig, u64 split)
2301 {
2302 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2303 	u64 size;
2304 
2305 	/* not delalloc, ignore it */
2306 	if (!(orig->state & EXTENT_DELALLOC))
2307 		return;
2308 
2309 	size = orig->end - orig->start + 1;
2310 	if (size > fs_info->max_extent_size) {
2311 		u32 num_extents;
2312 		u64 new_size;
2313 
2314 		/*
2315 		 * See the explanation in btrfs_merge_delalloc_extent, the same
2316 		 * applies here, just in reverse.
2317 		 */
2318 		new_size = orig->end - split + 1;
2319 		num_extents = count_max_extents(fs_info, new_size);
2320 		new_size = split - orig->start;
2321 		num_extents += count_max_extents(fs_info, new_size);
2322 		if (count_max_extents(fs_info, size) >= num_extents)
2323 			return;
2324 	}
2325 
2326 	spin_lock(&inode->lock);
2327 	btrfs_mod_outstanding_extents(inode, 1);
2328 	spin_unlock(&inode->lock);
2329 }
2330 
2331 /*
2332  * Handle merged delayed allocation extents so we can keep track of new extents
2333  * that are just merged onto old extents, such as when we are doing sequential
2334  * writes, so we can properly account for the metadata space we'll need.
2335  */
2336 void btrfs_merge_delalloc_extent(struct btrfs_inode *inode, struct extent_state *new,
2337 				 struct extent_state *other)
2338 {
2339 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2340 	u64 new_size, old_size;
2341 	u32 num_extents;
2342 
2343 	/* not delalloc, ignore it */
2344 	if (!(other->state & EXTENT_DELALLOC))
2345 		return;
2346 
2347 	if (new->start > other->start)
2348 		new_size = new->end - other->start + 1;
2349 	else
2350 		new_size = other->end - new->start + 1;
2351 
2352 	/* we're not bigger than the max, unreserve the space and go */
2353 	if (new_size <= fs_info->max_extent_size) {
2354 		spin_lock(&inode->lock);
2355 		btrfs_mod_outstanding_extents(inode, -1);
2356 		spin_unlock(&inode->lock);
2357 		return;
2358 	}
2359 
2360 	/*
2361 	 * We have to add up either side to figure out how many extents were
2362 	 * accounted for before we merged into one big extent.  If the number of
2363 	 * extents we accounted for is <= the amount we need for the new range
2364 	 * then we can return, otherwise drop.  Think of it like this
2365 	 *
2366 	 * [ 4k][MAX_SIZE]
2367 	 *
2368 	 * So we've grown the extent by a MAX_SIZE extent, this would mean we
2369 	 * need 2 outstanding extents, on one side we have 1 and the other side
2370 	 * we have 1 so they are == and we can return.  But in this case
2371 	 *
2372 	 * [MAX_SIZE+4k][MAX_SIZE+4k]
2373 	 *
2374 	 * Each range on their own accounts for 2 extents, but merged together
2375 	 * they are only 3 extents worth of accounting, so we need to drop in
2376 	 * this case.
2377 	 */
2378 	old_size = other->end - other->start + 1;
2379 	num_extents = count_max_extents(fs_info, old_size);
2380 	old_size = new->end - new->start + 1;
2381 	num_extents += count_max_extents(fs_info, old_size);
2382 	if (count_max_extents(fs_info, new_size) >= num_extents)
2383 		return;
2384 
2385 	spin_lock(&inode->lock);
2386 	btrfs_mod_outstanding_extents(inode, -1);
2387 	spin_unlock(&inode->lock);
2388 }
2389 
2390 static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
2391 				      struct btrfs_inode *inode)
2392 {
2393 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2394 
2395 	spin_lock(&root->delalloc_lock);
2396 	if (list_empty(&inode->delalloc_inodes)) {
2397 		list_add_tail(&inode->delalloc_inodes, &root->delalloc_inodes);
2398 		set_bit(BTRFS_INODE_IN_DELALLOC_LIST, &inode->runtime_flags);
2399 		root->nr_delalloc_inodes++;
2400 		if (root->nr_delalloc_inodes == 1) {
2401 			spin_lock(&fs_info->delalloc_root_lock);
2402 			BUG_ON(!list_empty(&root->delalloc_root));
2403 			list_add_tail(&root->delalloc_root,
2404 				      &fs_info->delalloc_roots);
2405 			spin_unlock(&fs_info->delalloc_root_lock);
2406 		}
2407 	}
2408 	spin_unlock(&root->delalloc_lock);
2409 }
2410 
2411 void __btrfs_del_delalloc_inode(struct btrfs_root *root,
2412 				struct btrfs_inode *inode)
2413 {
2414 	struct btrfs_fs_info *fs_info = root->fs_info;
2415 
2416 	if (!list_empty(&inode->delalloc_inodes)) {
2417 		list_del_init(&inode->delalloc_inodes);
2418 		clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
2419 			  &inode->runtime_flags);
2420 		root->nr_delalloc_inodes--;
2421 		if (!root->nr_delalloc_inodes) {
2422 			ASSERT(list_empty(&root->delalloc_inodes));
2423 			spin_lock(&fs_info->delalloc_root_lock);
2424 			BUG_ON(list_empty(&root->delalloc_root));
2425 			list_del_init(&root->delalloc_root);
2426 			spin_unlock(&fs_info->delalloc_root_lock);
2427 		}
2428 	}
2429 }
2430 
2431 static void btrfs_del_delalloc_inode(struct btrfs_root *root,
2432 				     struct btrfs_inode *inode)
2433 {
2434 	spin_lock(&root->delalloc_lock);
2435 	__btrfs_del_delalloc_inode(root, inode);
2436 	spin_unlock(&root->delalloc_lock);
2437 }
2438 
2439 /*
2440  * Properly track delayed allocation bytes in the inode and to maintain the
2441  * list of inodes that have pending delalloc work to be done.
2442  */
2443 void btrfs_set_delalloc_extent(struct btrfs_inode *inode, struct extent_state *state,
2444 			       u32 bits)
2445 {
2446 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2447 
2448 	if ((bits & EXTENT_DEFRAG) && !(bits & EXTENT_DELALLOC))
2449 		WARN_ON(1);
2450 	/*
2451 	 * set_bit and clear bit hooks normally require _irqsave/restore
2452 	 * but in this case, we are only testing for the DELALLOC
2453 	 * bit, which is only set or cleared with irqs on
2454 	 */
2455 	if (!(state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
2456 		struct btrfs_root *root = inode->root;
2457 		u64 len = state->end + 1 - state->start;
2458 		u32 num_extents = count_max_extents(fs_info, len);
2459 		bool do_list = !btrfs_is_free_space_inode(inode);
2460 
2461 		spin_lock(&inode->lock);
2462 		btrfs_mod_outstanding_extents(inode, num_extents);
2463 		spin_unlock(&inode->lock);
2464 
2465 		/* For sanity tests */
2466 		if (btrfs_is_testing(fs_info))
2467 			return;
2468 
2469 		percpu_counter_add_batch(&fs_info->delalloc_bytes, len,
2470 					 fs_info->delalloc_batch);
2471 		spin_lock(&inode->lock);
2472 		inode->delalloc_bytes += len;
2473 		if (bits & EXTENT_DEFRAG)
2474 			inode->defrag_bytes += len;
2475 		if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
2476 					 &inode->runtime_flags))
2477 			btrfs_add_delalloc_inodes(root, inode);
2478 		spin_unlock(&inode->lock);
2479 	}
2480 
2481 	if (!(state->state & EXTENT_DELALLOC_NEW) &&
2482 	    (bits & EXTENT_DELALLOC_NEW)) {
2483 		spin_lock(&inode->lock);
2484 		inode->new_delalloc_bytes += state->end + 1 - state->start;
2485 		spin_unlock(&inode->lock);
2486 	}
2487 }
2488 
2489 /*
2490  * Once a range is no longer delalloc this function ensures that proper
2491  * accounting happens.
2492  */
2493 void btrfs_clear_delalloc_extent(struct btrfs_inode *inode,
2494 				 struct extent_state *state, u32 bits)
2495 {
2496 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2497 	u64 len = state->end + 1 - state->start;
2498 	u32 num_extents = count_max_extents(fs_info, len);
2499 
2500 	if ((state->state & EXTENT_DEFRAG) && (bits & EXTENT_DEFRAG)) {
2501 		spin_lock(&inode->lock);
2502 		inode->defrag_bytes -= len;
2503 		spin_unlock(&inode->lock);
2504 	}
2505 
2506 	/*
2507 	 * set_bit and clear bit hooks normally require _irqsave/restore
2508 	 * but in this case, we are only testing for the DELALLOC
2509 	 * bit, which is only set or cleared with irqs on
2510 	 */
2511 	if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
2512 		struct btrfs_root *root = inode->root;
2513 		bool do_list = !btrfs_is_free_space_inode(inode);
2514 
2515 		spin_lock(&inode->lock);
2516 		btrfs_mod_outstanding_extents(inode, -num_extents);
2517 		spin_unlock(&inode->lock);
2518 
2519 		/*
2520 		 * We don't reserve metadata space for space cache inodes so we
2521 		 * don't need to call delalloc_release_metadata if there is an
2522 		 * error.
2523 		 */
2524 		if (bits & EXTENT_CLEAR_META_RESV &&
2525 		    root != fs_info->tree_root)
2526 			btrfs_delalloc_release_metadata(inode, len, false);
2527 
2528 		/* For sanity tests. */
2529 		if (btrfs_is_testing(fs_info))
2530 			return;
2531 
2532 		if (!btrfs_is_data_reloc_root(root) &&
2533 		    do_list && !(state->state & EXTENT_NORESERVE) &&
2534 		    (bits & EXTENT_CLEAR_DATA_RESV))
2535 			btrfs_free_reserved_data_space_noquota(fs_info, len);
2536 
2537 		percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
2538 					 fs_info->delalloc_batch);
2539 		spin_lock(&inode->lock);
2540 		inode->delalloc_bytes -= len;
2541 		if (do_list && inode->delalloc_bytes == 0 &&
2542 		    test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
2543 					&inode->runtime_flags))
2544 			btrfs_del_delalloc_inode(root, inode);
2545 		spin_unlock(&inode->lock);
2546 	}
2547 
2548 	if ((state->state & EXTENT_DELALLOC_NEW) &&
2549 	    (bits & EXTENT_DELALLOC_NEW)) {
2550 		spin_lock(&inode->lock);
2551 		ASSERT(inode->new_delalloc_bytes >= len);
2552 		inode->new_delalloc_bytes -= len;
2553 		if (bits & EXTENT_ADD_INODE_BYTES)
2554 			inode_add_bytes(&inode->vfs_inode, len);
2555 		spin_unlock(&inode->lock);
2556 	}
2557 }
2558 
2559 static int btrfs_extract_ordered_extent(struct btrfs_bio *bbio,
2560 					struct btrfs_ordered_extent *ordered)
2561 {
2562 	u64 start = (u64)bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
2563 	u64 len = bbio->bio.bi_iter.bi_size;
2564 	struct btrfs_ordered_extent *new;
2565 	int ret;
2566 
2567 	/* Must always be called for the beginning of an ordered extent. */
2568 	if (WARN_ON_ONCE(start != ordered->disk_bytenr))
2569 		return -EINVAL;
2570 
2571 	/* No need to split if the ordered extent covers the entire bio. */
2572 	if (ordered->disk_num_bytes == len) {
2573 		refcount_inc(&ordered->refs);
2574 		bbio->ordered = ordered;
2575 		return 0;
2576 	}
2577 
2578 	/*
2579 	 * Don't split the extent_map for NOCOW extents, as we're writing into
2580 	 * a pre-existing one.
2581 	 */
2582 	if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
2583 		ret = split_extent_map(bbio->inode, bbio->file_offset,
2584 				       ordered->num_bytes, len,
2585 				       ordered->disk_bytenr);
2586 		if (ret)
2587 			return ret;
2588 	}
2589 
2590 	new = btrfs_split_ordered_extent(ordered, len);
2591 	if (IS_ERR(new))
2592 		return PTR_ERR(new);
2593 	bbio->ordered = new;
2594 	return 0;
2595 }
2596 
2597 /*
2598  * given a list of ordered sums record them in the inode.  This happens
2599  * at IO completion time based on sums calculated at bio submission time.
2600  */
2601 static int add_pending_csums(struct btrfs_trans_handle *trans,
2602 			     struct list_head *list)
2603 {
2604 	struct btrfs_ordered_sum *sum;
2605 	struct btrfs_root *csum_root = NULL;
2606 	int ret;
2607 
2608 	list_for_each_entry(sum, list, list) {
2609 		trans->adding_csums = true;
2610 		if (!csum_root)
2611 			csum_root = btrfs_csum_root(trans->fs_info,
2612 						    sum->logical);
2613 		ret = btrfs_csum_file_blocks(trans, csum_root, sum);
2614 		trans->adding_csums = false;
2615 		if (ret)
2616 			return ret;
2617 	}
2618 	return 0;
2619 }
2620 
2621 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
2622 					 const u64 start,
2623 					 const u64 len,
2624 					 struct extent_state **cached_state)
2625 {
2626 	u64 search_start = start;
2627 	const u64 end = start + len - 1;
2628 
2629 	while (search_start < end) {
2630 		const u64 search_len = end - search_start + 1;
2631 		struct extent_map *em;
2632 		u64 em_len;
2633 		int ret = 0;
2634 
2635 		em = btrfs_get_extent(inode, NULL, 0, search_start, search_len);
2636 		if (IS_ERR(em))
2637 			return PTR_ERR(em);
2638 
2639 		if (em->block_start != EXTENT_MAP_HOLE)
2640 			goto next;
2641 
2642 		em_len = em->len;
2643 		if (em->start < search_start)
2644 			em_len -= search_start - em->start;
2645 		if (em_len > search_len)
2646 			em_len = search_len;
2647 
2648 		ret = set_extent_bit(&inode->io_tree, search_start,
2649 				     search_start + em_len - 1,
2650 				     EXTENT_DELALLOC_NEW, cached_state);
2651 next:
2652 		search_start = extent_map_end(em);
2653 		free_extent_map(em);
2654 		if (ret)
2655 			return ret;
2656 	}
2657 	return 0;
2658 }
2659 
2660 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
2661 			      unsigned int extra_bits,
2662 			      struct extent_state **cached_state)
2663 {
2664 	WARN_ON(PAGE_ALIGNED(end));
2665 
2666 	if (start >= i_size_read(&inode->vfs_inode) &&
2667 	    !(inode->flags & BTRFS_INODE_PREALLOC)) {
2668 		/*
2669 		 * There can't be any extents following eof in this case so just
2670 		 * set the delalloc new bit for the range directly.
2671 		 */
2672 		extra_bits |= EXTENT_DELALLOC_NEW;
2673 	} else {
2674 		int ret;
2675 
2676 		ret = btrfs_find_new_delalloc_bytes(inode, start,
2677 						    end + 1 - start,
2678 						    cached_state);
2679 		if (ret)
2680 			return ret;
2681 	}
2682 
2683 	return set_extent_bit(&inode->io_tree, start, end,
2684 			      EXTENT_DELALLOC | extra_bits, cached_state);
2685 }
2686 
2687 /* see btrfs_writepage_start_hook for details on why this is required */
2688 struct btrfs_writepage_fixup {
2689 	struct page *page;
2690 	struct btrfs_inode *inode;
2691 	struct btrfs_work work;
2692 };
2693 
2694 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
2695 {
2696 	struct btrfs_writepage_fixup *fixup =
2697 		container_of(work, struct btrfs_writepage_fixup, work);
2698 	struct btrfs_ordered_extent *ordered;
2699 	struct extent_state *cached_state = NULL;
2700 	struct extent_changeset *data_reserved = NULL;
2701 	struct page *page = fixup->page;
2702 	struct btrfs_inode *inode = fixup->inode;
2703 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2704 	u64 page_start = page_offset(page);
2705 	u64 page_end = page_offset(page) + PAGE_SIZE - 1;
2706 	int ret = 0;
2707 	bool free_delalloc_space = true;
2708 
2709 	/*
2710 	 * This is similar to page_mkwrite, we need to reserve the space before
2711 	 * we take the page lock.
2712 	 */
2713 	ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
2714 					   PAGE_SIZE);
2715 again:
2716 	lock_page(page);
2717 
2718 	/*
2719 	 * Before we queued this fixup, we took a reference on the page.
2720 	 * page->mapping may go NULL, but it shouldn't be moved to a different
2721 	 * address space.
2722 	 */
2723 	if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
2724 		/*
2725 		 * Unfortunately this is a little tricky, either
2726 		 *
2727 		 * 1) We got here and our page had already been dealt with and
2728 		 *    we reserved our space, thus ret == 0, so we need to just
2729 		 *    drop our space reservation and bail.  This can happen the
2730 		 *    first time we come into the fixup worker, or could happen
2731 		 *    while waiting for the ordered extent.
2732 		 * 2) Our page was already dealt with, but we happened to get an
2733 		 *    ENOSPC above from the btrfs_delalloc_reserve_space.  In
2734 		 *    this case we obviously don't have anything to release, but
2735 		 *    because the page was already dealt with we don't want to
2736 		 *    mark the page with an error, so make sure we're resetting
2737 		 *    ret to 0.  This is why we have this check _before_ the ret
2738 		 *    check, because we do not want to have a surprise ENOSPC
2739 		 *    when the page was already properly dealt with.
2740 		 */
2741 		if (!ret) {
2742 			btrfs_delalloc_release_extents(inode, PAGE_SIZE);
2743 			btrfs_delalloc_release_space(inode, data_reserved,
2744 						     page_start, PAGE_SIZE,
2745 						     true);
2746 		}
2747 		ret = 0;
2748 		goto out_page;
2749 	}
2750 
2751 	/*
2752 	 * We can't mess with the page state unless it is locked, so now that
2753 	 * it is locked bail if we failed to make our space reservation.
2754 	 */
2755 	if (ret)
2756 		goto out_page;
2757 
2758 	lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
2759 
2760 	/* already ordered? We're done */
2761 	if (PageOrdered(page))
2762 		goto out_reserved;
2763 
2764 	ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
2765 	if (ordered) {
2766 		unlock_extent(&inode->io_tree, page_start, page_end,
2767 			      &cached_state);
2768 		unlock_page(page);
2769 		btrfs_start_ordered_extent(ordered);
2770 		btrfs_put_ordered_extent(ordered);
2771 		goto again;
2772 	}
2773 
2774 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
2775 					&cached_state);
2776 	if (ret)
2777 		goto out_reserved;
2778 
2779 	/*
2780 	 * Everything went as planned, we're now the owner of a dirty page with
2781 	 * delayed allocation bits set and space reserved for our COW
2782 	 * destination.
2783 	 *
2784 	 * The page was dirty when we started, nothing should have cleaned it.
2785 	 */
2786 	BUG_ON(!PageDirty(page));
2787 	free_delalloc_space = false;
2788 out_reserved:
2789 	btrfs_delalloc_release_extents(inode, PAGE_SIZE);
2790 	if (free_delalloc_space)
2791 		btrfs_delalloc_release_space(inode, data_reserved, page_start,
2792 					     PAGE_SIZE, true);
2793 	unlock_extent(&inode->io_tree, page_start, page_end, &cached_state);
2794 out_page:
2795 	if (ret) {
2796 		/*
2797 		 * We hit ENOSPC or other errors.  Update the mapping and page
2798 		 * to reflect the errors and clean the page.
2799 		 */
2800 		mapping_set_error(page->mapping, ret);
2801 		btrfs_mark_ordered_io_finished(inode, page, page_start,
2802 					       PAGE_SIZE, !ret);
2803 		clear_page_dirty_for_io(page);
2804 	}
2805 	btrfs_folio_clear_checked(fs_info, page_folio(page), page_start, PAGE_SIZE);
2806 	unlock_page(page);
2807 	put_page(page);
2808 	kfree(fixup);
2809 	extent_changeset_free(data_reserved);
2810 	/*
2811 	 * As a precaution, do a delayed iput in case it would be the last iput
2812 	 * that could need flushing space. Recursing back to fixup worker would
2813 	 * deadlock.
2814 	 */
2815 	btrfs_add_delayed_iput(inode);
2816 }
2817 
2818 /*
2819  * There are a few paths in the higher layers of the kernel that directly
2820  * set the page dirty bit without asking the filesystem if it is a
2821  * good idea.  This causes problems because we want to make sure COW
2822  * properly happens and the data=ordered rules are followed.
2823  *
2824  * In our case any range that doesn't have the ORDERED bit set
2825  * hasn't been properly setup for IO.  We kick off an async process
2826  * to fix it up.  The async helper will wait for ordered extents, set
2827  * the delalloc bit and make it safe to write the page.
2828  */
2829 int btrfs_writepage_cow_fixup(struct page *page)
2830 {
2831 	struct inode *inode = page->mapping->host;
2832 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2833 	struct btrfs_writepage_fixup *fixup;
2834 
2835 	/* This page has ordered extent covering it already */
2836 	if (PageOrdered(page))
2837 		return 0;
2838 
2839 	/*
2840 	 * PageChecked is set below when we create a fixup worker for this page,
2841 	 * don't try to create another one if we're already PageChecked()
2842 	 *
2843 	 * The extent_io writepage code will redirty the page if we send back
2844 	 * EAGAIN.
2845 	 */
2846 	if (PageChecked(page))
2847 		return -EAGAIN;
2848 
2849 	fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
2850 	if (!fixup)
2851 		return -EAGAIN;
2852 
2853 	/*
2854 	 * We are already holding a reference to this inode from
2855 	 * write_cache_pages.  We need to hold it because the space reservation
2856 	 * takes place outside of the page lock, and we can't trust
2857 	 * page->mapping outside of the page lock.
2858 	 */
2859 	ihold(inode);
2860 	btrfs_folio_set_checked(fs_info, page_folio(page), page_offset(page), PAGE_SIZE);
2861 	get_page(page);
2862 	btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL);
2863 	fixup->page = page;
2864 	fixup->inode = BTRFS_I(inode);
2865 	btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
2866 
2867 	return -EAGAIN;
2868 }
2869 
2870 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
2871 				       struct btrfs_inode *inode, u64 file_pos,
2872 				       struct btrfs_file_extent_item *stack_fi,
2873 				       const bool update_inode_bytes,
2874 				       u64 qgroup_reserved)
2875 {
2876 	struct btrfs_root *root = inode->root;
2877 	const u64 sectorsize = root->fs_info->sectorsize;
2878 	struct btrfs_path *path;
2879 	struct extent_buffer *leaf;
2880 	struct btrfs_key ins;
2881 	u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi);
2882 	u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi);
2883 	u64 offset = btrfs_stack_file_extent_offset(stack_fi);
2884 	u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi);
2885 	u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi);
2886 	struct btrfs_drop_extents_args drop_args = { 0 };
2887 	int ret;
2888 
2889 	path = btrfs_alloc_path();
2890 	if (!path)
2891 		return -ENOMEM;
2892 
2893 	/*
2894 	 * we may be replacing one extent in the tree with another.
2895 	 * The new extent is pinned in the extent map, and we don't want
2896 	 * to drop it from the cache until it is completely in the btree.
2897 	 *
2898 	 * So, tell btrfs_drop_extents to leave this extent in the cache.
2899 	 * the caller is expected to unpin it and allow it to be merged
2900 	 * with the others.
2901 	 */
2902 	drop_args.path = path;
2903 	drop_args.start = file_pos;
2904 	drop_args.end = file_pos + num_bytes;
2905 	drop_args.replace_extent = true;
2906 	drop_args.extent_item_size = sizeof(*stack_fi);
2907 	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
2908 	if (ret)
2909 		goto out;
2910 
2911 	if (!drop_args.extent_inserted) {
2912 		ins.objectid = btrfs_ino(inode);
2913 		ins.offset = file_pos;
2914 		ins.type = BTRFS_EXTENT_DATA_KEY;
2915 
2916 		ret = btrfs_insert_empty_item(trans, root, path, &ins,
2917 					      sizeof(*stack_fi));
2918 		if (ret)
2919 			goto out;
2920 	}
2921 	leaf = path->nodes[0];
2922 	btrfs_set_stack_file_extent_generation(stack_fi, trans->transid);
2923 	write_extent_buffer(leaf, stack_fi,
2924 			btrfs_item_ptr_offset(leaf, path->slots[0]),
2925 			sizeof(struct btrfs_file_extent_item));
2926 
2927 	btrfs_mark_buffer_dirty(trans, leaf);
2928 	btrfs_release_path(path);
2929 
2930 	/*
2931 	 * If we dropped an inline extent here, we know the range where it is
2932 	 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the
2933 	 * number of bytes only for that range containing the inline extent.
2934 	 * The remaining of the range will be processed when clearning the
2935 	 * EXTENT_DELALLOC_BIT bit through the ordered extent completion.
2936 	 */
2937 	if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) {
2938 		u64 inline_size = round_down(drop_args.bytes_found, sectorsize);
2939 
2940 		inline_size = drop_args.bytes_found - inline_size;
2941 		btrfs_update_inode_bytes(inode, sectorsize, inline_size);
2942 		drop_args.bytes_found -= inline_size;
2943 		num_bytes -= sectorsize;
2944 	}
2945 
2946 	if (update_inode_bytes)
2947 		btrfs_update_inode_bytes(inode, num_bytes, drop_args.bytes_found);
2948 
2949 	ins.objectid = disk_bytenr;
2950 	ins.offset = disk_num_bytes;
2951 	ins.type = BTRFS_EXTENT_ITEM_KEY;
2952 
2953 	ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes);
2954 	if (ret)
2955 		goto out;
2956 
2957 	ret = btrfs_alloc_reserved_file_extent(trans, root, btrfs_ino(inode),
2958 					       file_pos - offset,
2959 					       qgroup_reserved, &ins);
2960 out:
2961 	btrfs_free_path(path);
2962 
2963 	return ret;
2964 }
2965 
2966 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
2967 					 u64 start, u64 len)
2968 {
2969 	struct btrfs_block_group *cache;
2970 
2971 	cache = btrfs_lookup_block_group(fs_info, start);
2972 	ASSERT(cache);
2973 
2974 	spin_lock(&cache->lock);
2975 	cache->delalloc_bytes -= len;
2976 	spin_unlock(&cache->lock);
2977 
2978 	btrfs_put_block_group(cache);
2979 }
2980 
2981 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans,
2982 					     struct btrfs_ordered_extent *oe)
2983 {
2984 	struct btrfs_file_extent_item stack_fi;
2985 	bool update_inode_bytes;
2986 	u64 num_bytes = oe->num_bytes;
2987 	u64 ram_bytes = oe->ram_bytes;
2988 
2989 	memset(&stack_fi, 0, sizeof(stack_fi));
2990 	btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG);
2991 	btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr);
2992 	btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi,
2993 						   oe->disk_num_bytes);
2994 	btrfs_set_stack_file_extent_offset(&stack_fi, oe->offset);
2995 	if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags)) {
2996 		num_bytes = oe->truncated_len;
2997 		ram_bytes = num_bytes;
2998 	}
2999 	btrfs_set_stack_file_extent_num_bytes(&stack_fi, num_bytes);
3000 	btrfs_set_stack_file_extent_ram_bytes(&stack_fi, ram_bytes);
3001 	btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type);
3002 	/* Encryption and other encoding is reserved and all 0 */
3003 
3004 	/*
3005 	 * For delalloc, when completing an ordered extent we update the inode's
3006 	 * bytes when clearing the range in the inode's io tree, so pass false
3007 	 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(),
3008 	 * except if the ordered extent was truncated.
3009 	 */
3010 	update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) ||
3011 			     test_bit(BTRFS_ORDERED_ENCODED, &oe->flags) ||
3012 			     test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags);
3013 
3014 	return insert_reserved_file_extent(trans, BTRFS_I(oe->inode),
3015 					   oe->file_offset, &stack_fi,
3016 					   update_inode_bytes, oe->qgroup_rsv);
3017 }
3018 
3019 /*
3020  * As ordered data IO finishes, this gets called so we can finish
3021  * an ordered extent if the range of bytes in the file it covers are
3022  * fully written.
3023  */
3024 int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
3025 {
3026 	struct btrfs_inode *inode = BTRFS_I(ordered_extent->inode);
3027 	struct btrfs_root *root = inode->root;
3028 	struct btrfs_fs_info *fs_info = root->fs_info;
3029 	struct btrfs_trans_handle *trans = NULL;
3030 	struct extent_io_tree *io_tree = &inode->io_tree;
3031 	struct extent_state *cached_state = NULL;
3032 	u64 start, end;
3033 	int compress_type = 0;
3034 	int ret = 0;
3035 	u64 logical_len = ordered_extent->num_bytes;
3036 	bool freespace_inode;
3037 	bool truncated = false;
3038 	bool clear_reserved_extent = true;
3039 	unsigned int clear_bits = EXTENT_DEFRAG;
3040 
3041 	start = ordered_extent->file_offset;
3042 	end = start + ordered_extent->num_bytes - 1;
3043 
3044 	if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3045 	    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
3046 	    !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags) &&
3047 	    !test_bit(BTRFS_ORDERED_ENCODED, &ordered_extent->flags))
3048 		clear_bits |= EXTENT_DELALLOC_NEW;
3049 
3050 	freespace_inode = btrfs_is_free_space_inode(inode);
3051 	if (!freespace_inode)
3052 		btrfs_lockdep_acquire(fs_info, btrfs_ordered_extent);
3053 
3054 	if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
3055 		ret = -EIO;
3056 		goto out;
3057 	}
3058 
3059 	if (btrfs_is_zoned(fs_info))
3060 		btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr,
3061 					ordered_extent->disk_num_bytes);
3062 
3063 	if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
3064 		truncated = true;
3065 		logical_len = ordered_extent->truncated_len;
3066 		/* Truncated the entire extent, don't bother adding */
3067 		if (!logical_len)
3068 			goto out;
3069 	}
3070 
3071 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
3072 		BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
3073 
3074 		btrfs_inode_safe_disk_i_size_write(inode, 0);
3075 		if (freespace_inode)
3076 			trans = btrfs_join_transaction_spacecache(root);
3077 		else
3078 			trans = btrfs_join_transaction(root);
3079 		if (IS_ERR(trans)) {
3080 			ret = PTR_ERR(trans);
3081 			trans = NULL;
3082 			goto out;
3083 		}
3084 		trans->block_rsv = &inode->block_rsv;
3085 		ret = btrfs_update_inode_fallback(trans, inode);
3086 		if (ret) /* -ENOMEM or corruption */
3087 			btrfs_abort_transaction(trans, ret);
3088 		goto out;
3089 	}
3090 
3091 	clear_bits |= EXTENT_LOCKED;
3092 	lock_extent(io_tree, start, end, &cached_state);
3093 
3094 	if (freespace_inode)
3095 		trans = btrfs_join_transaction_spacecache(root);
3096 	else
3097 		trans = btrfs_join_transaction(root);
3098 	if (IS_ERR(trans)) {
3099 		ret = PTR_ERR(trans);
3100 		trans = NULL;
3101 		goto out;
3102 	}
3103 
3104 	trans->block_rsv = &inode->block_rsv;
3105 
3106 	ret = btrfs_insert_raid_extent(trans, ordered_extent);
3107 	if (ret)
3108 		goto out;
3109 
3110 	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
3111 		compress_type = ordered_extent->compress_type;
3112 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3113 		BUG_ON(compress_type);
3114 		ret = btrfs_mark_extent_written(trans, inode,
3115 						ordered_extent->file_offset,
3116 						ordered_extent->file_offset +
3117 						logical_len);
3118 		btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr,
3119 						  ordered_extent->disk_num_bytes);
3120 	} else {
3121 		BUG_ON(root == fs_info->tree_root);
3122 		ret = insert_ordered_extent_file_extent(trans, ordered_extent);
3123 		if (!ret) {
3124 			clear_reserved_extent = false;
3125 			btrfs_release_delalloc_bytes(fs_info,
3126 						ordered_extent->disk_bytenr,
3127 						ordered_extent->disk_num_bytes);
3128 		}
3129 	}
3130 	unpin_extent_cache(inode, ordered_extent->file_offset,
3131 			   ordered_extent->num_bytes, trans->transid);
3132 	if (ret < 0) {
3133 		btrfs_abort_transaction(trans, ret);
3134 		goto out;
3135 	}
3136 
3137 	ret = add_pending_csums(trans, &ordered_extent->list);
3138 	if (ret) {
3139 		btrfs_abort_transaction(trans, ret);
3140 		goto out;
3141 	}
3142 
3143 	/*
3144 	 * If this is a new delalloc range, clear its new delalloc flag to
3145 	 * update the inode's number of bytes. This needs to be done first
3146 	 * before updating the inode item.
3147 	 */
3148 	if ((clear_bits & EXTENT_DELALLOC_NEW) &&
3149 	    !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags))
3150 		clear_extent_bit(&inode->io_tree, start, end,
3151 				 EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES,
3152 				 &cached_state);
3153 
3154 	btrfs_inode_safe_disk_i_size_write(inode, 0);
3155 	ret = btrfs_update_inode_fallback(trans, inode);
3156 	if (ret) { /* -ENOMEM or corruption */
3157 		btrfs_abort_transaction(trans, ret);
3158 		goto out;
3159 	}
3160 	ret = 0;
3161 out:
3162 	clear_extent_bit(&inode->io_tree, start, end, clear_bits,
3163 			 &cached_state);
3164 
3165 	if (trans)
3166 		btrfs_end_transaction(trans);
3167 
3168 	if (ret || truncated) {
3169 		u64 unwritten_start = start;
3170 
3171 		/*
3172 		 * If we failed to finish this ordered extent for any reason we
3173 		 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
3174 		 * extent, and mark the inode with the error if it wasn't
3175 		 * already set.  Any error during writeback would have already
3176 		 * set the mapping error, so we need to set it if we're the ones
3177 		 * marking this ordered extent as failed.
3178 		 */
3179 		if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR,
3180 					     &ordered_extent->flags))
3181 			mapping_set_error(ordered_extent->inode->i_mapping, -EIO);
3182 
3183 		if (truncated)
3184 			unwritten_start += logical_len;
3185 		clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
3186 
3187 		/* Drop extent maps for the part of the extent we didn't write. */
3188 		btrfs_drop_extent_map_range(inode, unwritten_start, end, false);
3189 
3190 		/*
3191 		 * If the ordered extent had an IOERR or something else went
3192 		 * wrong we need to return the space for this ordered extent
3193 		 * back to the allocator.  We only free the extent in the
3194 		 * truncated case if we didn't write out the extent at all.
3195 		 *
3196 		 * If we made it past insert_reserved_file_extent before we
3197 		 * errored out then we don't need to do this as the accounting
3198 		 * has already been done.
3199 		 */
3200 		if ((ret || !logical_len) &&
3201 		    clear_reserved_extent &&
3202 		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3203 		    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3204 			/*
3205 			 * Discard the range before returning it back to the
3206 			 * free space pool
3207 			 */
3208 			if (ret && btrfs_test_opt(fs_info, DISCARD_SYNC))
3209 				btrfs_discard_extent(fs_info,
3210 						ordered_extent->disk_bytenr,
3211 						ordered_extent->disk_num_bytes,
3212 						NULL);
3213 			btrfs_free_reserved_extent(fs_info,
3214 					ordered_extent->disk_bytenr,
3215 					ordered_extent->disk_num_bytes, 1);
3216 			/*
3217 			 * Actually free the qgroup rsv which was released when
3218 			 * the ordered extent was created.
3219 			 */
3220 			btrfs_qgroup_free_refroot(fs_info, inode->root->root_key.objectid,
3221 						  ordered_extent->qgroup_rsv,
3222 						  BTRFS_QGROUP_RSV_DATA);
3223 		}
3224 	}
3225 
3226 	/*
3227 	 * This needs to be done to make sure anybody waiting knows we are done
3228 	 * updating everything for this ordered extent.
3229 	 */
3230 	btrfs_remove_ordered_extent(inode, ordered_extent);
3231 
3232 	/* once for us */
3233 	btrfs_put_ordered_extent(ordered_extent);
3234 	/* once for the tree */
3235 	btrfs_put_ordered_extent(ordered_extent);
3236 
3237 	return ret;
3238 }
3239 
3240 int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered)
3241 {
3242 	if (btrfs_is_zoned(btrfs_sb(ordered->inode->i_sb)) &&
3243 	    !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
3244 	    list_empty(&ordered->bioc_list))
3245 		btrfs_finish_ordered_zoned(ordered);
3246 	return btrfs_finish_one_ordered(ordered);
3247 }
3248 
3249 /*
3250  * Verify the checksum for a single sector without any extra action that depend
3251  * on the type of I/O.
3252  */
3253 int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page,
3254 			    u32 pgoff, u8 *csum, const u8 * const csum_expected)
3255 {
3256 	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
3257 	char *kaddr;
3258 
3259 	ASSERT(pgoff + fs_info->sectorsize <= PAGE_SIZE);
3260 
3261 	shash->tfm = fs_info->csum_shash;
3262 
3263 	kaddr = kmap_local_page(page) + pgoff;
3264 	crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum);
3265 	kunmap_local(kaddr);
3266 
3267 	if (memcmp(csum, csum_expected, fs_info->csum_size))
3268 		return -EIO;
3269 	return 0;
3270 }
3271 
3272 /*
3273  * Verify the checksum of a single data sector.
3274  *
3275  * @bbio:	btrfs_io_bio which contains the csum
3276  * @dev:	device the sector is on
3277  * @bio_offset:	offset to the beginning of the bio (in bytes)
3278  * @bv:		bio_vec to check
3279  *
3280  * Check if the checksum on a data block is valid.  When a checksum mismatch is
3281  * detected, report the error and fill the corrupted range with zero.
3282  *
3283  * Return %true if the sector is ok or had no checksum to start with, else %false.
3284  */
3285 bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev,
3286 			u32 bio_offset, struct bio_vec *bv)
3287 {
3288 	struct btrfs_inode *inode = bbio->inode;
3289 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3290 	u64 file_offset = bbio->file_offset + bio_offset;
3291 	u64 end = file_offset + bv->bv_len - 1;
3292 	u8 *csum_expected;
3293 	u8 csum[BTRFS_CSUM_SIZE];
3294 
3295 	ASSERT(bv->bv_len == fs_info->sectorsize);
3296 
3297 	if (!bbio->csum)
3298 		return true;
3299 
3300 	if (btrfs_is_data_reloc_root(inode->root) &&
3301 	    test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM,
3302 			   NULL)) {
3303 		/* Skip the range without csum for data reloc inode */
3304 		clear_extent_bits(&inode->io_tree, file_offset, end,
3305 				  EXTENT_NODATASUM);
3306 		return true;
3307 	}
3308 
3309 	csum_expected = bbio->csum + (bio_offset >> fs_info->sectorsize_bits) *
3310 				fs_info->csum_size;
3311 	if (btrfs_check_sector_csum(fs_info, bv->bv_page, bv->bv_offset, csum,
3312 				    csum_expected))
3313 		goto zeroit;
3314 	return true;
3315 
3316 zeroit:
3317 	btrfs_print_data_csum_error(inode, file_offset, csum, csum_expected,
3318 				    bbio->mirror_num);
3319 	if (dev)
3320 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS);
3321 	memzero_bvec(bv);
3322 	return false;
3323 }
3324 
3325 /*
3326  * Perform a delayed iput on @inode.
3327  *
3328  * @inode: The inode we want to perform iput on
3329  *
3330  * This function uses the generic vfs_inode::i_count to track whether we should
3331  * just decrement it (in case it's > 1) or if this is the last iput then link
3332  * the inode to the delayed iput machinery. Delayed iputs are processed at
3333  * transaction commit time/superblock commit/cleaner kthread.
3334  */
3335 void btrfs_add_delayed_iput(struct btrfs_inode *inode)
3336 {
3337 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3338 	unsigned long flags;
3339 
3340 	if (atomic_add_unless(&inode->vfs_inode.i_count, -1, 1))
3341 		return;
3342 
3343 	atomic_inc(&fs_info->nr_delayed_iputs);
3344 	/*
3345 	 * Need to be irq safe here because we can be called from either an irq
3346 	 * context (see bio.c and btrfs_put_ordered_extent()) or a non-irq
3347 	 * context.
3348 	 */
3349 	spin_lock_irqsave(&fs_info->delayed_iput_lock, flags);
3350 	ASSERT(list_empty(&inode->delayed_iput));
3351 	list_add_tail(&inode->delayed_iput, &fs_info->delayed_iputs);
3352 	spin_unlock_irqrestore(&fs_info->delayed_iput_lock, flags);
3353 	if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags))
3354 		wake_up_process(fs_info->cleaner_kthread);
3355 }
3356 
3357 static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info,
3358 				    struct btrfs_inode *inode)
3359 {
3360 	list_del_init(&inode->delayed_iput);
3361 	spin_unlock_irq(&fs_info->delayed_iput_lock);
3362 	iput(&inode->vfs_inode);
3363 	if (atomic_dec_and_test(&fs_info->nr_delayed_iputs))
3364 		wake_up(&fs_info->delayed_iputs_wait);
3365 	spin_lock_irq(&fs_info->delayed_iput_lock);
3366 }
3367 
3368 static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info,
3369 				   struct btrfs_inode *inode)
3370 {
3371 	if (!list_empty(&inode->delayed_iput)) {
3372 		spin_lock_irq(&fs_info->delayed_iput_lock);
3373 		if (!list_empty(&inode->delayed_iput))
3374 			run_delayed_iput_locked(fs_info, inode);
3375 		spin_unlock_irq(&fs_info->delayed_iput_lock);
3376 	}
3377 }
3378 
3379 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
3380 {
3381 	/*
3382 	 * btrfs_put_ordered_extent() can run in irq context (see bio.c), which
3383 	 * calls btrfs_add_delayed_iput() and that needs to lock
3384 	 * fs_info->delayed_iput_lock. So we need to disable irqs here to
3385 	 * prevent a deadlock.
3386 	 */
3387 	spin_lock_irq(&fs_info->delayed_iput_lock);
3388 	while (!list_empty(&fs_info->delayed_iputs)) {
3389 		struct btrfs_inode *inode;
3390 
3391 		inode = list_first_entry(&fs_info->delayed_iputs,
3392 				struct btrfs_inode, delayed_iput);
3393 		run_delayed_iput_locked(fs_info, inode);
3394 		if (need_resched()) {
3395 			spin_unlock_irq(&fs_info->delayed_iput_lock);
3396 			cond_resched();
3397 			spin_lock_irq(&fs_info->delayed_iput_lock);
3398 		}
3399 	}
3400 	spin_unlock_irq(&fs_info->delayed_iput_lock);
3401 }
3402 
3403 /*
3404  * Wait for flushing all delayed iputs
3405  *
3406  * @fs_info:  the filesystem
3407  *
3408  * This will wait on any delayed iputs that are currently running with KILLABLE
3409  * set.  Once they are all done running we will return, unless we are killed in
3410  * which case we return EINTR. This helps in user operations like fallocate etc
3411  * that might get blocked on the iputs.
3412  *
3413  * Return EINTR if we were killed, 0 if nothing's pending
3414  */
3415 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info)
3416 {
3417 	int ret = wait_event_killable(fs_info->delayed_iputs_wait,
3418 			atomic_read(&fs_info->nr_delayed_iputs) == 0);
3419 	if (ret)
3420 		return -EINTR;
3421 	return 0;
3422 }
3423 
3424 /*
3425  * This creates an orphan entry for the given inode in case something goes wrong
3426  * in the middle of an unlink.
3427  */
3428 int btrfs_orphan_add(struct btrfs_trans_handle *trans,
3429 		     struct btrfs_inode *inode)
3430 {
3431 	int ret;
3432 
3433 	ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode));
3434 	if (ret && ret != -EEXIST) {
3435 		btrfs_abort_transaction(trans, ret);
3436 		return ret;
3437 	}
3438 
3439 	return 0;
3440 }
3441 
3442 /*
3443  * We have done the delete so we can go ahead and remove the orphan item for
3444  * this particular inode.
3445  */
3446 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3447 			    struct btrfs_inode *inode)
3448 {
3449 	return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode));
3450 }
3451 
3452 /*
3453  * this cleans up any orphans that may be left on the list from the last use
3454  * of this root.
3455  */
3456 int btrfs_orphan_cleanup(struct btrfs_root *root)
3457 {
3458 	struct btrfs_fs_info *fs_info = root->fs_info;
3459 	struct btrfs_path *path;
3460 	struct extent_buffer *leaf;
3461 	struct btrfs_key key, found_key;
3462 	struct btrfs_trans_handle *trans;
3463 	struct inode *inode;
3464 	u64 last_objectid = 0;
3465 	int ret = 0, nr_unlink = 0;
3466 
3467 	if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state))
3468 		return 0;
3469 
3470 	path = btrfs_alloc_path();
3471 	if (!path) {
3472 		ret = -ENOMEM;
3473 		goto out;
3474 	}
3475 	path->reada = READA_BACK;
3476 
3477 	key.objectid = BTRFS_ORPHAN_OBJECTID;
3478 	key.type = BTRFS_ORPHAN_ITEM_KEY;
3479 	key.offset = (u64)-1;
3480 
3481 	while (1) {
3482 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3483 		if (ret < 0)
3484 			goto out;
3485 
3486 		/*
3487 		 * if ret == 0 means we found what we were searching for, which
3488 		 * is weird, but possible, so only screw with path if we didn't
3489 		 * find the key and see if we have stuff that matches
3490 		 */
3491 		if (ret > 0) {
3492 			ret = 0;
3493 			if (path->slots[0] == 0)
3494 				break;
3495 			path->slots[0]--;
3496 		}
3497 
3498 		/* pull out the item */
3499 		leaf = path->nodes[0];
3500 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3501 
3502 		/* make sure the item matches what we want */
3503 		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3504 			break;
3505 		if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
3506 			break;
3507 
3508 		/* release the path since we're done with it */
3509 		btrfs_release_path(path);
3510 
3511 		/*
3512 		 * this is where we are basically btrfs_lookup, without the
3513 		 * crossing root thing.  we store the inode number in the
3514 		 * offset of the orphan item.
3515 		 */
3516 
3517 		if (found_key.offset == last_objectid) {
3518 			/*
3519 			 * We found the same inode as before. This means we were
3520 			 * not able to remove its items via eviction triggered
3521 			 * by an iput(). A transaction abort may have happened,
3522 			 * due to -ENOSPC for example, so try to grab the error
3523 			 * that lead to a transaction abort, if any.
3524 			 */
3525 			btrfs_err(fs_info,
3526 				  "Error removing orphan entry, stopping orphan cleanup");
3527 			ret = BTRFS_FS_ERROR(fs_info) ?: -EINVAL;
3528 			goto out;
3529 		}
3530 
3531 		last_objectid = found_key.offset;
3532 
3533 		found_key.objectid = found_key.offset;
3534 		found_key.type = BTRFS_INODE_ITEM_KEY;
3535 		found_key.offset = 0;
3536 		inode = btrfs_iget(fs_info->sb, last_objectid, root);
3537 		if (IS_ERR(inode)) {
3538 			ret = PTR_ERR(inode);
3539 			inode = NULL;
3540 			if (ret != -ENOENT)
3541 				goto out;
3542 		}
3543 
3544 		if (!inode && root == fs_info->tree_root) {
3545 			struct btrfs_root *dead_root;
3546 			int is_dead_root = 0;
3547 
3548 			/*
3549 			 * This is an orphan in the tree root. Currently these
3550 			 * could come from 2 sources:
3551 			 *  a) a root (snapshot/subvolume) deletion in progress
3552 			 *  b) a free space cache inode
3553 			 * We need to distinguish those two, as the orphan item
3554 			 * for a root must not get deleted before the deletion
3555 			 * of the snapshot/subvolume's tree completes.
3556 			 *
3557 			 * btrfs_find_orphan_roots() ran before us, which has
3558 			 * found all deleted roots and loaded them into
3559 			 * fs_info->fs_roots_radix. So here we can find if an
3560 			 * orphan item corresponds to a deleted root by looking
3561 			 * up the root from that radix tree.
3562 			 */
3563 
3564 			spin_lock(&fs_info->fs_roots_radix_lock);
3565 			dead_root = radix_tree_lookup(&fs_info->fs_roots_radix,
3566 							 (unsigned long)found_key.objectid);
3567 			if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0)
3568 				is_dead_root = 1;
3569 			spin_unlock(&fs_info->fs_roots_radix_lock);
3570 
3571 			if (is_dead_root) {
3572 				/* prevent this orphan from being found again */
3573 				key.offset = found_key.objectid - 1;
3574 				continue;
3575 			}
3576 
3577 		}
3578 
3579 		/*
3580 		 * If we have an inode with links, there are a couple of
3581 		 * possibilities:
3582 		 *
3583 		 * 1. We were halfway through creating fsverity metadata for the
3584 		 * file. In that case, the orphan item represents incomplete
3585 		 * fsverity metadata which must be cleaned up with
3586 		 * btrfs_drop_verity_items and deleting the orphan item.
3587 
3588 		 * 2. Old kernels (before v3.12) used to create an
3589 		 * orphan item for truncate indicating that there were possibly
3590 		 * extent items past i_size that needed to be deleted. In v3.12,
3591 		 * truncate was changed to update i_size in sync with the extent
3592 		 * items, but the (useless) orphan item was still created. Since
3593 		 * v4.18, we don't create the orphan item for truncate at all.
3594 		 *
3595 		 * So, this item could mean that we need to do a truncate, but
3596 		 * only if this filesystem was last used on a pre-v3.12 kernel
3597 		 * and was not cleanly unmounted. The odds of that are quite
3598 		 * slim, and it's a pain to do the truncate now, so just delete
3599 		 * the orphan item.
3600 		 *
3601 		 * It's also possible that this orphan item was supposed to be
3602 		 * deleted but wasn't. The inode number may have been reused,
3603 		 * but either way, we can delete the orphan item.
3604 		 */
3605 		if (!inode || inode->i_nlink) {
3606 			if (inode) {
3607 				ret = btrfs_drop_verity_items(BTRFS_I(inode));
3608 				iput(inode);
3609 				inode = NULL;
3610 				if (ret)
3611 					goto out;
3612 			}
3613 			trans = btrfs_start_transaction(root, 1);
3614 			if (IS_ERR(trans)) {
3615 				ret = PTR_ERR(trans);
3616 				goto out;
3617 			}
3618 			btrfs_debug(fs_info, "auto deleting %Lu",
3619 				    found_key.objectid);
3620 			ret = btrfs_del_orphan_item(trans, root,
3621 						    found_key.objectid);
3622 			btrfs_end_transaction(trans);
3623 			if (ret)
3624 				goto out;
3625 			continue;
3626 		}
3627 
3628 		nr_unlink++;
3629 
3630 		/* this will do delete_inode and everything for us */
3631 		iput(inode);
3632 	}
3633 	/* release the path since we're done with it */
3634 	btrfs_release_path(path);
3635 
3636 	if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
3637 		trans = btrfs_join_transaction(root);
3638 		if (!IS_ERR(trans))
3639 			btrfs_end_transaction(trans);
3640 	}
3641 
3642 	if (nr_unlink)
3643 		btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink);
3644 
3645 out:
3646 	if (ret)
3647 		btrfs_err(fs_info, "could not do orphan cleanup %d", ret);
3648 	btrfs_free_path(path);
3649 	return ret;
3650 }
3651 
3652 /*
3653  * very simple check to peek ahead in the leaf looking for xattrs.  If we
3654  * don't find any xattrs, we know there can't be any acls.
3655  *
3656  * slot is the slot the inode is in, objectid is the objectid of the inode
3657  */
3658 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3659 					  int slot, u64 objectid,
3660 					  int *first_xattr_slot)
3661 {
3662 	u32 nritems = btrfs_header_nritems(leaf);
3663 	struct btrfs_key found_key;
3664 	static u64 xattr_access = 0;
3665 	static u64 xattr_default = 0;
3666 	int scanned = 0;
3667 
3668 	if (!xattr_access) {
3669 		xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS,
3670 					strlen(XATTR_NAME_POSIX_ACL_ACCESS));
3671 		xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT,
3672 					strlen(XATTR_NAME_POSIX_ACL_DEFAULT));
3673 	}
3674 
3675 	slot++;
3676 	*first_xattr_slot = -1;
3677 	while (slot < nritems) {
3678 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3679 
3680 		/* we found a different objectid, there must not be acls */
3681 		if (found_key.objectid != objectid)
3682 			return 0;
3683 
3684 		/* we found an xattr, assume we've got an acl */
3685 		if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3686 			if (*first_xattr_slot == -1)
3687 				*first_xattr_slot = slot;
3688 			if (found_key.offset == xattr_access ||
3689 			    found_key.offset == xattr_default)
3690 				return 1;
3691 		}
3692 
3693 		/*
3694 		 * we found a key greater than an xattr key, there can't
3695 		 * be any acls later on
3696 		 */
3697 		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3698 			return 0;
3699 
3700 		slot++;
3701 		scanned++;
3702 
3703 		/*
3704 		 * it goes inode, inode backrefs, xattrs, extents,
3705 		 * so if there are a ton of hard links to an inode there can
3706 		 * be a lot of backrefs.  Don't waste time searching too hard,
3707 		 * this is just an optimization
3708 		 */
3709 		if (scanned >= 8)
3710 			break;
3711 	}
3712 	/* we hit the end of the leaf before we found an xattr or
3713 	 * something larger than an xattr.  We have to assume the inode
3714 	 * has acls
3715 	 */
3716 	if (*first_xattr_slot == -1)
3717 		*first_xattr_slot = slot;
3718 	return 1;
3719 }
3720 
3721 /*
3722  * read an inode from the btree into the in-memory inode
3723  */
3724 static int btrfs_read_locked_inode(struct inode *inode,
3725 				   struct btrfs_path *in_path)
3726 {
3727 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3728 	struct btrfs_path *path = in_path;
3729 	struct extent_buffer *leaf;
3730 	struct btrfs_inode_item *inode_item;
3731 	struct btrfs_root *root = BTRFS_I(inode)->root;
3732 	struct btrfs_key location;
3733 	unsigned long ptr;
3734 	int maybe_acls;
3735 	u32 rdev;
3736 	int ret;
3737 	bool filled = false;
3738 	int first_xattr_slot;
3739 
3740 	ret = btrfs_fill_inode(inode, &rdev);
3741 	if (!ret)
3742 		filled = true;
3743 
3744 	if (!path) {
3745 		path = btrfs_alloc_path();
3746 		if (!path)
3747 			return -ENOMEM;
3748 	}
3749 
3750 	memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3751 
3752 	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3753 	if (ret) {
3754 		if (path != in_path)
3755 			btrfs_free_path(path);
3756 		return ret;
3757 	}
3758 
3759 	leaf = path->nodes[0];
3760 
3761 	if (filled)
3762 		goto cache_index;
3763 
3764 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
3765 				    struct btrfs_inode_item);
3766 	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3767 	set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3768 	i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3769 	i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3770 	btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item));
3771 	btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
3772 			round_up(i_size_read(inode), fs_info->sectorsize));
3773 
3774 	inode_set_atime(inode, btrfs_timespec_sec(leaf, &inode_item->atime),
3775 			btrfs_timespec_nsec(leaf, &inode_item->atime));
3776 
3777 	inode_set_mtime(inode, btrfs_timespec_sec(leaf, &inode_item->mtime),
3778 			btrfs_timespec_nsec(leaf, &inode_item->mtime));
3779 
3780 	inode_set_ctime(inode, btrfs_timespec_sec(leaf, &inode_item->ctime),
3781 			btrfs_timespec_nsec(leaf, &inode_item->ctime));
3782 
3783 	BTRFS_I(inode)->i_otime_sec = btrfs_timespec_sec(leaf, &inode_item->otime);
3784 	BTRFS_I(inode)->i_otime_nsec = btrfs_timespec_nsec(leaf, &inode_item->otime);
3785 
3786 	inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3787 	BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3788 	BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3789 
3790 	inode_set_iversion_queried(inode,
3791 				   btrfs_inode_sequence(leaf, inode_item));
3792 	inode->i_generation = BTRFS_I(inode)->generation;
3793 	inode->i_rdev = 0;
3794 	rdev = btrfs_inode_rdev(leaf, inode_item);
3795 
3796 	BTRFS_I(inode)->index_cnt = (u64)-1;
3797 	btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item),
3798 				&BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
3799 
3800 cache_index:
3801 	/*
3802 	 * If we were modified in the current generation and evicted from memory
3803 	 * and then re-read we need to do a full sync since we don't have any
3804 	 * idea about which extents were modified before we were evicted from
3805 	 * cache.
3806 	 *
3807 	 * This is required for both inode re-read from disk and delayed inode
3808 	 * in the delayed_nodes xarray.
3809 	 */
3810 	if (BTRFS_I(inode)->last_trans == btrfs_get_fs_generation(fs_info))
3811 		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3812 			&BTRFS_I(inode)->runtime_flags);
3813 
3814 	/*
3815 	 * We don't persist the id of the transaction where an unlink operation
3816 	 * against the inode was last made. So here we assume the inode might
3817 	 * have been evicted, and therefore the exact value of last_unlink_trans
3818 	 * lost, and set it to last_trans to avoid metadata inconsistencies
3819 	 * between the inode and its parent if the inode is fsync'ed and the log
3820 	 * replayed. For example, in the scenario:
3821 	 *
3822 	 * touch mydir/foo
3823 	 * ln mydir/foo mydir/bar
3824 	 * sync
3825 	 * unlink mydir/bar
3826 	 * echo 2 > /proc/sys/vm/drop_caches   # evicts inode
3827 	 * xfs_io -c fsync mydir/foo
3828 	 * <power failure>
3829 	 * mount fs, triggers fsync log replay
3830 	 *
3831 	 * We must make sure that when we fsync our inode foo we also log its
3832 	 * parent inode, otherwise after log replay the parent still has the
3833 	 * dentry with the "bar" name but our inode foo has a link count of 1
3834 	 * and doesn't have an inode ref with the name "bar" anymore.
3835 	 *
3836 	 * Setting last_unlink_trans to last_trans is a pessimistic approach,
3837 	 * but it guarantees correctness at the expense of occasional full
3838 	 * transaction commits on fsync if our inode is a directory, or if our
3839 	 * inode is not a directory, logging its parent unnecessarily.
3840 	 */
3841 	BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
3842 
3843 	/*
3844 	 * Same logic as for last_unlink_trans. We don't persist the generation
3845 	 * of the last transaction where this inode was used for a reflink
3846 	 * operation, so after eviction and reloading the inode we must be
3847 	 * pessimistic and assume the last transaction that modified the inode.
3848 	 */
3849 	BTRFS_I(inode)->last_reflink_trans = BTRFS_I(inode)->last_trans;
3850 
3851 	path->slots[0]++;
3852 	if (inode->i_nlink != 1 ||
3853 	    path->slots[0] >= btrfs_header_nritems(leaf))
3854 		goto cache_acl;
3855 
3856 	btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
3857 	if (location.objectid != btrfs_ino(BTRFS_I(inode)))
3858 		goto cache_acl;
3859 
3860 	ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3861 	if (location.type == BTRFS_INODE_REF_KEY) {
3862 		struct btrfs_inode_ref *ref;
3863 
3864 		ref = (struct btrfs_inode_ref *)ptr;
3865 		BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
3866 	} else if (location.type == BTRFS_INODE_EXTREF_KEY) {
3867 		struct btrfs_inode_extref *extref;
3868 
3869 		extref = (struct btrfs_inode_extref *)ptr;
3870 		BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
3871 								     extref);
3872 	}
3873 cache_acl:
3874 	/*
3875 	 * try to precache a NULL acl entry for files that don't have
3876 	 * any xattrs or acls
3877 	 */
3878 	maybe_acls = acls_after_inode_item(leaf, path->slots[0],
3879 			btrfs_ino(BTRFS_I(inode)), &first_xattr_slot);
3880 	if (first_xattr_slot != -1) {
3881 		path->slots[0] = first_xattr_slot;
3882 		ret = btrfs_load_inode_props(inode, path);
3883 		if (ret)
3884 			btrfs_err(fs_info,
3885 				  "error loading props for ino %llu (root %llu): %d",
3886 				  btrfs_ino(BTRFS_I(inode)),
3887 				  root->root_key.objectid, ret);
3888 	}
3889 	if (path != in_path)
3890 		btrfs_free_path(path);
3891 
3892 	if (!maybe_acls)
3893 		cache_no_acl(inode);
3894 
3895 	switch (inode->i_mode & S_IFMT) {
3896 	case S_IFREG:
3897 		inode->i_mapping->a_ops = &btrfs_aops;
3898 		inode->i_fop = &btrfs_file_operations;
3899 		inode->i_op = &btrfs_file_inode_operations;
3900 		break;
3901 	case S_IFDIR:
3902 		inode->i_fop = &btrfs_dir_file_operations;
3903 		inode->i_op = &btrfs_dir_inode_operations;
3904 		break;
3905 	case S_IFLNK:
3906 		inode->i_op = &btrfs_symlink_inode_operations;
3907 		inode_nohighmem(inode);
3908 		inode->i_mapping->a_ops = &btrfs_aops;
3909 		break;
3910 	default:
3911 		inode->i_op = &btrfs_special_inode_operations;
3912 		init_special_inode(inode, inode->i_mode, rdev);
3913 		break;
3914 	}
3915 
3916 	btrfs_sync_inode_flags_to_i_flags(inode);
3917 	return 0;
3918 }
3919 
3920 /*
3921  * given a leaf and an inode, copy the inode fields into the leaf
3922  */
3923 static void fill_inode_item(struct btrfs_trans_handle *trans,
3924 			    struct extent_buffer *leaf,
3925 			    struct btrfs_inode_item *item,
3926 			    struct inode *inode)
3927 {
3928 	struct btrfs_map_token token;
3929 	u64 flags;
3930 
3931 	btrfs_init_map_token(&token, leaf);
3932 
3933 	btrfs_set_token_inode_uid(&token, item, i_uid_read(inode));
3934 	btrfs_set_token_inode_gid(&token, item, i_gid_read(inode));
3935 	btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size);
3936 	btrfs_set_token_inode_mode(&token, item, inode->i_mode);
3937 	btrfs_set_token_inode_nlink(&token, item, inode->i_nlink);
3938 
3939 	btrfs_set_token_timespec_sec(&token, &item->atime,
3940 				     inode_get_atime_sec(inode));
3941 	btrfs_set_token_timespec_nsec(&token, &item->atime,
3942 				      inode_get_atime_nsec(inode));
3943 
3944 	btrfs_set_token_timespec_sec(&token, &item->mtime,
3945 				     inode_get_mtime_sec(inode));
3946 	btrfs_set_token_timespec_nsec(&token, &item->mtime,
3947 				      inode_get_mtime_nsec(inode));
3948 
3949 	btrfs_set_token_timespec_sec(&token, &item->ctime,
3950 				     inode_get_ctime_sec(inode));
3951 	btrfs_set_token_timespec_nsec(&token, &item->ctime,
3952 				      inode_get_ctime_nsec(inode));
3953 
3954 	btrfs_set_token_timespec_sec(&token, &item->otime, BTRFS_I(inode)->i_otime_sec);
3955 	btrfs_set_token_timespec_nsec(&token, &item->otime, BTRFS_I(inode)->i_otime_nsec);
3956 
3957 	btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode));
3958 	btrfs_set_token_inode_generation(&token, item,
3959 					 BTRFS_I(inode)->generation);
3960 	btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode));
3961 	btrfs_set_token_inode_transid(&token, item, trans->transid);
3962 	btrfs_set_token_inode_rdev(&token, item, inode->i_rdev);
3963 	flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
3964 					  BTRFS_I(inode)->ro_flags);
3965 	btrfs_set_token_inode_flags(&token, item, flags);
3966 	btrfs_set_token_inode_block_group(&token, item, 0);
3967 }
3968 
3969 /*
3970  * copy everything in the in-memory inode into the btree.
3971  */
3972 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
3973 					    struct btrfs_inode *inode)
3974 {
3975 	struct btrfs_inode_item *inode_item;
3976 	struct btrfs_path *path;
3977 	struct extent_buffer *leaf;
3978 	int ret;
3979 
3980 	path = btrfs_alloc_path();
3981 	if (!path)
3982 		return -ENOMEM;
3983 
3984 	ret = btrfs_lookup_inode(trans, inode->root, path, &inode->location, 1);
3985 	if (ret) {
3986 		if (ret > 0)
3987 			ret = -ENOENT;
3988 		goto failed;
3989 	}
3990 
3991 	leaf = path->nodes[0];
3992 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
3993 				    struct btrfs_inode_item);
3994 
3995 	fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode);
3996 	btrfs_mark_buffer_dirty(trans, leaf);
3997 	btrfs_set_inode_last_trans(trans, inode);
3998 	ret = 0;
3999 failed:
4000 	btrfs_free_path(path);
4001 	return ret;
4002 }
4003 
4004 /*
4005  * copy everything in the in-memory inode into the btree.
4006  */
4007 int btrfs_update_inode(struct btrfs_trans_handle *trans,
4008 		       struct btrfs_inode *inode)
4009 {
4010 	struct btrfs_root *root = inode->root;
4011 	struct btrfs_fs_info *fs_info = root->fs_info;
4012 	int ret;
4013 
4014 	/*
4015 	 * If the inode is a free space inode, we can deadlock during commit
4016 	 * if we put it into the delayed code.
4017 	 *
4018 	 * The data relocation inode should also be directly updated
4019 	 * without delay
4020 	 */
4021 	if (!btrfs_is_free_space_inode(inode)
4022 	    && !btrfs_is_data_reloc_root(root)
4023 	    && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
4024 		btrfs_update_root_times(trans, root);
4025 
4026 		ret = btrfs_delayed_update_inode(trans, inode);
4027 		if (!ret)
4028 			btrfs_set_inode_last_trans(trans, inode);
4029 		return ret;
4030 	}
4031 
4032 	return btrfs_update_inode_item(trans, inode);
4033 }
4034 
4035 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
4036 				struct btrfs_inode *inode)
4037 {
4038 	int ret;
4039 
4040 	ret = btrfs_update_inode(trans, inode);
4041 	if (ret == -ENOSPC)
4042 		return btrfs_update_inode_item(trans, inode);
4043 	return ret;
4044 }
4045 
4046 /*
4047  * unlink helper that gets used here in inode.c and in the tree logging
4048  * recovery code.  It remove a link in a directory with a given name, and
4049  * also drops the back refs in the inode to the directory
4050  */
4051 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4052 				struct btrfs_inode *dir,
4053 				struct btrfs_inode *inode,
4054 				const struct fscrypt_str *name,
4055 				struct btrfs_rename_ctx *rename_ctx)
4056 {
4057 	struct btrfs_root *root = dir->root;
4058 	struct btrfs_fs_info *fs_info = root->fs_info;
4059 	struct btrfs_path *path;
4060 	int ret = 0;
4061 	struct btrfs_dir_item *di;
4062 	u64 index;
4063 	u64 ino = btrfs_ino(inode);
4064 	u64 dir_ino = btrfs_ino(dir);
4065 
4066 	path = btrfs_alloc_path();
4067 	if (!path) {
4068 		ret = -ENOMEM;
4069 		goto out;
4070 	}
4071 
4072 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, -1);
4073 	if (IS_ERR_OR_NULL(di)) {
4074 		ret = di ? PTR_ERR(di) : -ENOENT;
4075 		goto err;
4076 	}
4077 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
4078 	if (ret)
4079 		goto err;
4080 	btrfs_release_path(path);
4081 
4082 	/*
4083 	 * If we don't have dir index, we have to get it by looking up
4084 	 * the inode ref, since we get the inode ref, remove it directly,
4085 	 * it is unnecessary to do delayed deletion.
4086 	 *
4087 	 * But if we have dir index, needn't search inode ref to get it.
4088 	 * Since the inode ref is close to the inode item, it is better
4089 	 * that we delay to delete it, and just do this deletion when
4090 	 * we update the inode item.
4091 	 */
4092 	if (inode->dir_index) {
4093 		ret = btrfs_delayed_delete_inode_ref(inode);
4094 		if (!ret) {
4095 			index = inode->dir_index;
4096 			goto skip_backref;
4097 		}
4098 	}
4099 
4100 	ret = btrfs_del_inode_ref(trans, root, name, ino, dir_ino, &index);
4101 	if (ret) {
4102 		btrfs_info(fs_info,
4103 			"failed to delete reference to %.*s, inode %llu parent %llu",
4104 			name->len, name->name, ino, dir_ino);
4105 		btrfs_abort_transaction(trans, ret);
4106 		goto err;
4107 	}
4108 skip_backref:
4109 	if (rename_ctx)
4110 		rename_ctx->index = index;
4111 
4112 	ret = btrfs_delete_delayed_dir_index(trans, dir, index);
4113 	if (ret) {
4114 		btrfs_abort_transaction(trans, ret);
4115 		goto err;
4116 	}
4117 
4118 	/*
4119 	 * If we are in a rename context, we don't need to update anything in the
4120 	 * log. That will be done later during the rename by btrfs_log_new_name().
4121 	 * Besides that, doing it here would only cause extra unnecessary btree
4122 	 * operations on the log tree, increasing latency for applications.
4123 	 */
4124 	if (!rename_ctx) {
4125 		btrfs_del_inode_ref_in_log(trans, root, name, inode, dir_ino);
4126 		btrfs_del_dir_entries_in_log(trans, root, name, dir, index);
4127 	}
4128 
4129 	/*
4130 	 * If we have a pending delayed iput we could end up with the final iput
4131 	 * being run in btrfs-cleaner context.  If we have enough of these built
4132 	 * up we can end up burning a lot of time in btrfs-cleaner without any
4133 	 * way to throttle the unlinks.  Since we're currently holding a ref on
4134 	 * the inode we can run the delayed iput here without any issues as the
4135 	 * final iput won't be done until after we drop the ref we're currently
4136 	 * holding.
4137 	 */
4138 	btrfs_run_delayed_iput(fs_info, inode);
4139 err:
4140 	btrfs_free_path(path);
4141 	if (ret)
4142 		goto out;
4143 
4144 	btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2);
4145 	inode_inc_iversion(&inode->vfs_inode);
4146 	inode_inc_iversion(&dir->vfs_inode);
4147  	inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode));
4148 	ret = btrfs_update_inode(trans, dir);
4149 out:
4150 	return ret;
4151 }
4152 
4153 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4154 		       struct btrfs_inode *dir, struct btrfs_inode *inode,
4155 		       const struct fscrypt_str *name)
4156 {
4157 	int ret;
4158 
4159 	ret = __btrfs_unlink_inode(trans, dir, inode, name, NULL);
4160 	if (!ret) {
4161 		drop_nlink(&inode->vfs_inode);
4162 		ret = btrfs_update_inode(trans, inode);
4163 	}
4164 	return ret;
4165 }
4166 
4167 /*
4168  * helper to start transaction for unlink and rmdir.
4169  *
4170  * unlink and rmdir are special in btrfs, they do not always free space, so
4171  * if we cannot make our reservations the normal way try and see if there is
4172  * plenty of slack room in the global reserve to migrate, otherwise we cannot
4173  * allow the unlink to occur.
4174  */
4175 static struct btrfs_trans_handle *__unlink_start_trans(struct btrfs_inode *dir)
4176 {
4177 	struct btrfs_root *root = dir->root;
4178 
4179 	return btrfs_start_transaction_fallback_global_rsv(root,
4180 						   BTRFS_UNLINK_METADATA_UNITS);
4181 }
4182 
4183 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
4184 {
4185 	struct btrfs_trans_handle *trans;
4186 	struct inode *inode = d_inode(dentry);
4187 	int ret;
4188 	struct fscrypt_name fname;
4189 
4190 	ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
4191 	if (ret)
4192 		return ret;
4193 
4194 	/* This needs to handle no-key deletions later on */
4195 
4196 	trans = __unlink_start_trans(BTRFS_I(dir));
4197 	if (IS_ERR(trans)) {
4198 		ret = PTR_ERR(trans);
4199 		goto fscrypt_free;
4200 	}
4201 
4202 	btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4203 				false);
4204 
4205 	ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4206 				 &fname.disk_name);
4207 	if (ret)
4208 		goto end_trans;
4209 
4210 	if (inode->i_nlink == 0) {
4211 		ret = btrfs_orphan_add(trans, BTRFS_I(inode));
4212 		if (ret)
4213 			goto end_trans;
4214 	}
4215 
4216 end_trans:
4217 	btrfs_end_transaction(trans);
4218 	btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info);
4219 fscrypt_free:
4220 	fscrypt_free_filename(&fname);
4221 	return ret;
4222 }
4223 
4224 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4225 			       struct btrfs_inode *dir, struct dentry *dentry)
4226 {
4227 	struct btrfs_root *root = dir->root;
4228 	struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
4229 	struct btrfs_path *path;
4230 	struct extent_buffer *leaf;
4231 	struct btrfs_dir_item *di;
4232 	struct btrfs_key key;
4233 	u64 index;
4234 	int ret;
4235 	u64 objectid;
4236 	u64 dir_ino = btrfs_ino(dir);
4237 	struct fscrypt_name fname;
4238 
4239 	ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname);
4240 	if (ret)
4241 		return ret;
4242 
4243 	/* This needs to handle no-key deletions later on */
4244 
4245 	if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
4246 		objectid = inode->root->root_key.objectid;
4247 	} else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4248 		objectid = inode->location.objectid;
4249 	} else {
4250 		WARN_ON(1);
4251 		fscrypt_free_filename(&fname);
4252 		return -EINVAL;
4253 	}
4254 
4255 	path = btrfs_alloc_path();
4256 	if (!path) {
4257 		ret = -ENOMEM;
4258 		goto out;
4259 	}
4260 
4261 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4262 				   &fname.disk_name, -1);
4263 	if (IS_ERR_OR_NULL(di)) {
4264 		ret = di ? PTR_ERR(di) : -ENOENT;
4265 		goto out;
4266 	}
4267 
4268 	leaf = path->nodes[0];
4269 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
4270 	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
4271 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
4272 	if (ret) {
4273 		btrfs_abort_transaction(trans, ret);
4274 		goto out;
4275 	}
4276 	btrfs_release_path(path);
4277 
4278 	/*
4279 	 * This is a placeholder inode for a subvolume we didn't have a
4280 	 * reference to at the time of the snapshot creation.  In the meantime
4281 	 * we could have renamed the real subvol link into our snapshot, so
4282 	 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect.
4283 	 * Instead simply lookup the dir_index_item for this entry so we can
4284 	 * remove it.  Otherwise we know we have a ref to the root and we can
4285 	 * call btrfs_del_root_ref, and it _shouldn't_ fail.
4286 	 */
4287 	if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4288 		di = btrfs_search_dir_index_item(root, path, dir_ino, &fname.disk_name);
4289 		if (IS_ERR_OR_NULL(di)) {
4290 			if (!di)
4291 				ret = -ENOENT;
4292 			else
4293 				ret = PTR_ERR(di);
4294 			btrfs_abort_transaction(trans, ret);
4295 			goto out;
4296 		}
4297 
4298 		leaf = path->nodes[0];
4299 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4300 		index = key.offset;
4301 		btrfs_release_path(path);
4302 	} else {
4303 		ret = btrfs_del_root_ref(trans, objectid,
4304 					 root->root_key.objectid, dir_ino,
4305 					 &index, &fname.disk_name);
4306 		if (ret) {
4307 			btrfs_abort_transaction(trans, ret);
4308 			goto out;
4309 		}
4310 	}
4311 
4312 	ret = btrfs_delete_delayed_dir_index(trans, dir, index);
4313 	if (ret) {
4314 		btrfs_abort_transaction(trans, ret);
4315 		goto out;
4316 	}
4317 
4318 	btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2);
4319 	inode_inc_iversion(&dir->vfs_inode);
4320 	inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode));
4321 	ret = btrfs_update_inode_fallback(trans, dir);
4322 	if (ret)
4323 		btrfs_abort_transaction(trans, ret);
4324 out:
4325 	btrfs_free_path(path);
4326 	fscrypt_free_filename(&fname);
4327 	return ret;
4328 }
4329 
4330 /*
4331  * Helper to check if the subvolume references other subvolumes or if it's
4332  * default.
4333  */
4334 static noinline int may_destroy_subvol(struct btrfs_root *root)
4335 {
4336 	struct btrfs_fs_info *fs_info = root->fs_info;
4337 	struct btrfs_path *path;
4338 	struct btrfs_dir_item *di;
4339 	struct btrfs_key key;
4340 	struct fscrypt_str name = FSTR_INIT("default", 7);
4341 	u64 dir_id;
4342 	int ret;
4343 
4344 	path = btrfs_alloc_path();
4345 	if (!path)
4346 		return -ENOMEM;
4347 
4348 	/* Make sure this root isn't set as the default subvol */
4349 	dir_id = btrfs_super_root_dir(fs_info->super_copy);
4350 	di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path,
4351 				   dir_id, &name, 0);
4352 	if (di && !IS_ERR(di)) {
4353 		btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
4354 		if (key.objectid == root->root_key.objectid) {
4355 			ret = -EPERM;
4356 			btrfs_err(fs_info,
4357 				  "deleting default subvolume %llu is not allowed",
4358 				  key.objectid);
4359 			goto out;
4360 		}
4361 		btrfs_release_path(path);
4362 	}
4363 
4364 	key.objectid = root->root_key.objectid;
4365 	key.type = BTRFS_ROOT_REF_KEY;
4366 	key.offset = (u64)-1;
4367 
4368 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4369 	if (ret < 0)
4370 		goto out;
4371 	BUG_ON(ret == 0);
4372 
4373 	ret = 0;
4374 	if (path->slots[0] > 0) {
4375 		path->slots[0]--;
4376 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
4377 		if (key.objectid == root->root_key.objectid &&
4378 		    key.type == BTRFS_ROOT_REF_KEY)
4379 			ret = -ENOTEMPTY;
4380 	}
4381 out:
4382 	btrfs_free_path(path);
4383 	return ret;
4384 }
4385 
4386 /* Delete all dentries for inodes belonging to the root */
4387 static void btrfs_prune_dentries(struct btrfs_root *root)
4388 {
4389 	struct btrfs_fs_info *fs_info = root->fs_info;
4390 	struct rb_node *node;
4391 	struct rb_node *prev;
4392 	struct btrfs_inode *entry;
4393 	struct inode *inode;
4394 	u64 objectid = 0;
4395 
4396 	if (!BTRFS_FS_ERROR(fs_info))
4397 		WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4398 
4399 	spin_lock(&root->inode_lock);
4400 again:
4401 	node = root->inode_tree.rb_node;
4402 	prev = NULL;
4403 	while (node) {
4404 		prev = node;
4405 		entry = rb_entry(node, struct btrfs_inode, rb_node);
4406 
4407 		if (objectid < btrfs_ino(entry))
4408 			node = node->rb_left;
4409 		else if (objectid > btrfs_ino(entry))
4410 			node = node->rb_right;
4411 		else
4412 			break;
4413 	}
4414 	if (!node) {
4415 		while (prev) {
4416 			entry = rb_entry(prev, struct btrfs_inode, rb_node);
4417 			if (objectid <= btrfs_ino(entry)) {
4418 				node = prev;
4419 				break;
4420 			}
4421 			prev = rb_next(prev);
4422 		}
4423 	}
4424 	while (node) {
4425 		entry = rb_entry(node, struct btrfs_inode, rb_node);
4426 		objectid = btrfs_ino(entry) + 1;
4427 		inode = igrab(&entry->vfs_inode);
4428 		if (inode) {
4429 			spin_unlock(&root->inode_lock);
4430 			if (atomic_read(&inode->i_count) > 1)
4431 				d_prune_aliases(inode);
4432 			/*
4433 			 * btrfs_drop_inode will have it removed from the inode
4434 			 * cache when its usage count hits zero.
4435 			 */
4436 			iput(inode);
4437 			cond_resched();
4438 			spin_lock(&root->inode_lock);
4439 			goto again;
4440 		}
4441 
4442 		if (cond_resched_lock(&root->inode_lock))
4443 			goto again;
4444 
4445 		node = rb_next(node);
4446 	}
4447 	spin_unlock(&root->inode_lock);
4448 }
4449 
4450 int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
4451 {
4452 	struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb);
4453 	struct btrfs_root *root = dir->root;
4454 	struct inode *inode = d_inode(dentry);
4455 	struct btrfs_root *dest = BTRFS_I(inode)->root;
4456 	struct btrfs_trans_handle *trans;
4457 	struct btrfs_block_rsv block_rsv;
4458 	u64 root_flags;
4459 	int ret;
4460 
4461 	/*
4462 	 * Don't allow to delete a subvolume with send in progress. This is
4463 	 * inside the inode lock so the error handling that has to drop the bit
4464 	 * again is not run concurrently.
4465 	 */
4466 	spin_lock(&dest->root_item_lock);
4467 	if (dest->send_in_progress) {
4468 		spin_unlock(&dest->root_item_lock);
4469 		btrfs_warn(fs_info,
4470 			   "attempt to delete subvolume %llu during send",
4471 			   dest->root_key.objectid);
4472 		return -EPERM;
4473 	}
4474 	if (atomic_read(&dest->nr_swapfiles)) {
4475 		spin_unlock(&dest->root_item_lock);
4476 		btrfs_warn(fs_info,
4477 			   "attempt to delete subvolume %llu with active swapfile",
4478 			   root->root_key.objectid);
4479 		return -EPERM;
4480 	}
4481 	root_flags = btrfs_root_flags(&dest->root_item);
4482 	btrfs_set_root_flags(&dest->root_item,
4483 			     root_flags | BTRFS_ROOT_SUBVOL_DEAD);
4484 	spin_unlock(&dest->root_item_lock);
4485 
4486 	down_write(&fs_info->subvol_sem);
4487 
4488 	ret = may_destroy_subvol(dest);
4489 	if (ret)
4490 		goto out_up_write;
4491 
4492 	btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
4493 	/*
4494 	 * One for dir inode,
4495 	 * two for dir entries,
4496 	 * two for root ref/backref.
4497 	 */
4498 	ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
4499 	if (ret)
4500 		goto out_up_write;
4501 
4502 	trans = btrfs_start_transaction(root, 0);
4503 	if (IS_ERR(trans)) {
4504 		ret = PTR_ERR(trans);
4505 		goto out_release;
4506 	}
4507 	trans->block_rsv = &block_rsv;
4508 	trans->bytes_reserved = block_rsv.size;
4509 
4510 	btrfs_record_snapshot_destroy(trans, dir);
4511 
4512 	ret = btrfs_unlink_subvol(trans, dir, dentry);
4513 	if (ret) {
4514 		btrfs_abort_transaction(trans, ret);
4515 		goto out_end_trans;
4516 	}
4517 
4518 	ret = btrfs_record_root_in_trans(trans, dest);
4519 	if (ret) {
4520 		btrfs_abort_transaction(trans, ret);
4521 		goto out_end_trans;
4522 	}
4523 
4524 	memset(&dest->root_item.drop_progress, 0,
4525 		sizeof(dest->root_item.drop_progress));
4526 	btrfs_set_root_drop_level(&dest->root_item, 0);
4527 	btrfs_set_root_refs(&dest->root_item, 0);
4528 
4529 	if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
4530 		ret = btrfs_insert_orphan_item(trans,
4531 					fs_info->tree_root,
4532 					dest->root_key.objectid);
4533 		if (ret) {
4534 			btrfs_abort_transaction(trans, ret);
4535 			goto out_end_trans;
4536 		}
4537 	}
4538 
4539 	ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid,
4540 				  BTRFS_UUID_KEY_SUBVOL,
4541 				  dest->root_key.objectid);
4542 	if (ret && ret != -ENOENT) {
4543 		btrfs_abort_transaction(trans, ret);
4544 		goto out_end_trans;
4545 	}
4546 	if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
4547 		ret = btrfs_uuid_tree_remove(trans,
4548 					  dest->root_item.received_uuid,
4549 					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4550 					  dest->root_key.objectid);
4551 		if (ret && ret != -ENOENT) {
4552 			btrfs_abort_transaction(trans, ret);
4553 			goto out_end_trans;
4554 		}
4555 	}
4556 
4557 	free_anon_bdev(dest->anon_dev);
4558 	dest->anon_dev = 0;
4559 out_end_trans:
4560 	trans->block_rsv = NULL;
4561 	trans->bytes_reserved = 0;
4562 	ret = btrfs_end_transaction(trans);
4563 	inode->i_flags |= S_DEAD;
4564 out_release:
4565 	btrfs_subvolume_release_metadata(root, &block_rsv);
4566 out_up_write:
4567 	up_write(&fs_info->subvol_sem);
4568 	if (ret) {
4569 		spin_lock(&dest->root_item_lock);
4570 		root_flags = btrfs_root_flags(&dest->root_item);
4571 		btrfs_set_root_flags(&dest->root_item,
4572 				root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
4573 		spin_unlock(&dest->root_item_lock);
4574 	} else {
4575 		d_invalidate(dentry);
4576 		btrfs_prune_dentries(dest);
4577 		ASSERT(dest->send_in_progress == 0);
4578 	}
4579 
4580 	return ret;
4581 }
4582 
4583 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
4584 {
4585 	struct inode *inode = d_inode(dentry);
4586 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
4587 	int err = 0;
4588 	struct btrfs_trans_handle *trans;
4589 	u64 last_unlink_trans;
4590 	struct fscrypt_name fname;
4591 
4592 	if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
4593 		return -ENOTEMPTY;
4594 	if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID) {
4595 		if (unlikely(btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))) {
4596 			btrfs_err(fs_info,
4597 			"extent tree v2 doesn't support snapshot deletion yet");
4598 			return -EOPNOTSUPP;
4599 		}
4600 		return btrfs_delete_subvolume(BTRFS_I(dir), dentry);
4601 	}
4602 
4603 	err = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
4604 	if (err)
4605 		return err;
4606 
4607 	/* This needs to handle no-key deletions later on */
4608 
4609 	trans = __unlink_start_trans(BTRFS_I(dir));
4610 	if (IS_ERR(trans)) {
4611 		err = PTR_ERR(trans);
4612 		goto out_notrans;
4613 	}
4614 
4615 	if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4616 		err = btrfs_unlink_subvol(trans, BTRFS_I(dir), dentry);
4617 		goto out;
4618 	}
4619 
4620 	err = btrfs_orphan_add(trans, BTRFS_I(inode));
4621 	if (err)
4622 		goto out;
4623 
4624 	last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
4625 
4626 	/* now the directory is empty */
4627 	err = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4628 				 &fname.disk_name);
4629 	if (!err) {
4630 		btrfs_i_size_write(BTRFS_I(inode), 0);
4631 		/*
4632 		 * Propagate the last_unlink_trans value of the deleted dir to
4633 		 * its parent directory. This is to prevent an unrecoverable
4634 		 * log tree in the case we do something like this:
4635 		 * 1) create dir foo
4636 		 * 2) create snapshot under dir foo
4637 		 * 3) delete the snapshot
4638 		 * 4) rmdir foo
4639 		 * 5) mkdir foo
4640 		 * 6) fsync foo or some file inside foo
4641 		 */
4642 		if (last_unlink_trans >= trans->transid)
4643 			BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
4644 	}
4645 out:
4646 	btrfs_end_transaction(trans);
4647 out_notrans:
4648 	btrfs_btree_balance_dirty(fs_info);
4649 	fscrypt_free_filename(&fname);
4650 
4651 	return err;
4652 }
4653 
4654 /*
4655  * Read, zero a chunk and write a block.
4656  *
4657  * @inode - inode that we're zeroing
4658  * @from - the offset to start zeroing
4659  * @len - the length to zero, 0 to zero the entire range respective to the
4660  *	offset
4661  * @front - zero up to the offset instead of from the offset on
4662  *
4663  * This will find the block for the "from" offset and cow the block and zero the
4664  * part we want to zero.  This is used with truncate and hole punching.
4665  */
4666 int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
4667 			 int front)
4668 {
4669 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
4670 	struct address_space *mapping = inode->vfs_inode.i_mapping;
4671 	struct extent_io_tree *io_tree = &inode->io_tree;
4672 	struct btrfs_ordered_extent *ordered;
4673 	struct extent_state *cached_state = NULL;
4674 	struct extent_changeset *data_reserved = NULL;
4675 	bool only_release_metadata = false;
4676 	u32 blocksize = fs_info->sectorsize;
4677 	pgoff_t index = from >> PAGE_SHIFT;
4678 	unsigned offset = from & (blocksize - 1);
4679 	struct page *page;
4680 	gfp_t mask = btrfs_alloc_write_mask(mapping);
4681 	size_t write_bytes = blocksize;
4682 	int ret = 0;
4683 	u64 block_start;
4684 	u64 block_end;
4685 
4686 	if (IS_ALIGNED(offset, blocksize) &&
4687 	    (!len || IS_ALIGNED(len, blocksize)))
4688 		goto out;
4689 
4690 	block_start = round_down(from, blocksize);
4691 	block_end = block_start + blocksize - 1;
4692 
4693 	ret = btrfs_check_data_free_space(inode, &data_reserved, block_start,
4694 					  blocksize, false);
4695 	if (ret < 0) {
4696 		if (btrfs_check_nocow_lock(inode, block_start, &write_bytes, false) > 0) {
4697 			/* For nocow case, no need to reserve data space */
4698 			only_release_metadata = true;
4699 		} else {
4700 			goto out;
4701 		}
4702 	}
4703 	ret = btrfs_delalloc_reserve_metadata(inode, blocksize, blocksize, false);
4704 	if (ret < 0) {
4705 		if (!only_release_metadata)
4706 			btrfs_free_reserved_data_space(inode, data_reserved,
4707 						       block_start, blocksize);
4708 		goto out;
4709 	}
4710 again:
4711 	page = find_or_create_page(mapping, index, mask);
4712 	if (!page) {
4713 		btrfs_delalloc_release_space(inode, data_reserved, block_start,
4714 					     blocksize, true);
4715 		btrfs_delalloc_release_extents(inode, blocksize);
4716 		ret = -ENOMEM;
4717 		goto out;
4718 	}
4719 
4720 	if (!PageUptodate(page)) {
4721 		ret = btrfs_read_folio(NULL, page_folio(page));
4722 		lock_page(page);
4723 		if (page->mapping != mapping) {
4724 			unlock_page(page);
4725 			put_page(page);
4726 			goto again;
4727 		}
4728 		if (!PageUptodate(page)) {
4729 			ret = -EIO;
4730 			goto out_unlock;
4731 		}
4732 	}
4733 
4734 	/*
4735 	 * We unlock the page after the io is completed and then re-lock it
4736 	 * above.  release_folio() could have come in between that and cleared
4737 	 * folio private, but left the page in the mapping.  Set the page mapped
4738 	 * here to make sure it's properly set for the subpage stuff.
4739 	 */
4740 	ret = set_page_extent_mapped(page);
4741 	if (ret < 0)
4742 		goto out_unlock;
4743 
4744 	wait_on_page_writeback(page);
4745 
4746 	lock_extent(io_tree, block_start, block_end, &cached_state);
4747 
4748 	ordered = btrfs_lookup_ordered_extent(inode, block_start);
4749 	if (ordered) {
4750 		unlock_extent(io_tree, block_start, block_end, &cached_state);
4751 		unlock_page(page);
4752 		put_page(page);
4753 		btrfs_start_ordered_extent(ordered);
4754 		btrfs_put_ordered_extent(ordered);
4755 		goto again;
4756 	}
4757 
4758 	clear_extent_bit(&inode->io_tree, block_start, block_end,
4759 			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4760 			 &cached_state);
4761 
4762 	ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
4763 					&cached_state);
4764 	if (ret) {
4765 		unlock_extent(io_tree, block_start, block_end, &cached_state);
4766 		goto out_unlock;
4767 	}
4768 
4769 	if (offset != blocksize) {
4770 		if (!len)
4771 			len = blocksize - offset;
4772 		if (front)
4773 			memzero_page(page, (block_start - page_offset(page)),
4774 				     offset);
4775 		else
4776 			memzero_page(page, (block_start - page_offset(page)) + offset,
4777 				     len);
4778 	}
4779 	btrfs_folio_clear_checked(fs_info, page_folio(page), block_start,
4780 				  block_end + 1 - block_start);
4781 	btrfs_folio_set_dirty(fs_info, page_folio(page), block_start,
4782 			      block_end + 1 - block_start);
4783 	unlock_extent(io_tree, block_start, block_end, &cached_state);
4784 
4785 	if (only_release_metadata)
4786 		set_extent_bit(&inode->io_tree, block_start, block_end,
4787 			       EXTENT_NORESERVE, NULL);
4788 
4789 out_unlock:
4790 	if (ret) {
4791 		if (only_release_metadata)
4792 			btrfs_delalloc_release_metadata(inode, blocksize, true);
4793 		else
4794 			btrfs_delalloc_release_space(inode, data_reserved,
4795 					block_start, blocksize, true);
4796 	}
4797 	btrfs_delalloc_release_extents(inode, blocksize);
4798 	unlock_page(page);
4799 	put_page(page);
4800 out:
4801 	if (only_release_metadata)
4802 		btrfs_check_nocow_unlock(inode);
4803 	extent_changeset_free(data_reserved);
4804 	return ret;
4805 }
4806 
4807 static int maybe_insert_hole(struct btrfs_inode *inode, u64 offset, u64 len)
4808 {
4809 	struct btrfs_root *root = inode->root;
4810 	struct btrfs_fs_info *fs_info = root->fs_info;
4811 	struct btrfs_trans_handle *trans;
4812 	struct btrfs_drop_extents_args drop_args = { 0 };
4813 	int ret;
4814 
4815 	/*
4816 	 * If NO_HOLES is enabled, we don't need to do anything.
4817 	 * Later, up in the call chain, either btrfs_set_inode_last_sub_trans()
4818 	 * or btrfs_update_inode() will be called, which guarantee that the next
4819 	 * fsync will know this inode was changed and needs to be logged.
4820 	 */
4821 	if (btrfs_fs_incompat(fs_info, NO_HOLES))
4822 		return 0;
4823 
4824 	/*
4825 	 * 1 - for the one we're dropping
4826 	 * 1 - for the one we're adding
4827 	 * 1 - for updating the inode.
4828 	 */
4829 	trans = btrfs_start_transaction(root, 3);
4830 	if (IS_ERR(trans))
4831 		return PTR_ERR(trans);
4832 
4833 	drop_args.start = offset;
4834 	drop_args.end = offset + len;
4835 	drop_args.drop_cache = true;
4836 
4837 	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
4838 	if (ret) {
4839 		btrfs_abort_transaction(trans, ret);
4840 		btrfs_end_transaction(trans);
4841 		return ret;
4842 	}
4843 
4844 	ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset, len);
4845 	if (ret) {
4846 		btrfs_abort_transaction(trans, ret);
4847 	} else {
4848 		btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found);
4849 		btrfs_update_inode(trans, inode);
4850 	}
4851 	btrfs_end_transaction(trans);
4852 	return ret;
4853 }
4854 
4855 /*
4856  * This function puts in dummy file extents for the area we're creating a hole
4857  * for.  So if we are truncating this file to a larger size we need to insert
4858  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4859  * the range between oldsize and size
4860  */
4861 int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
4862 {
4863 	struct btrfs_root *root = inode->root;
4864 	struct btrfs_fs_info *fs_info = root->fs_info;
4865 	struct extent_io_tree *io_tree = &inode->io_tree;
4866 	struct extent_map *em = NULL;
4867 	struct extent_state *cached_state = NULL;
4868 	u64 hole_start = ALIGN(oldsize, fs_info->sectorsize);
4869 	u64 block_end = ALIGN(size, fs_info->sectorsize);
4870 	u64 last_byte;
4871 	u64 cur_offset;
4872 	u64 hole_size;
4873 	int err = 0;
4874 
4875 	/*
4876 	 * If our size started in the middle of a block we need to zero out the
4877 	 * rest of the block before we expand the i_size, otherwise we could
4878 	 * expose stale data.
4879 	 */
4880 	err = btrfs_truncate_block(inode, oldsize, 0, 0);
4881 	if (err)
4882 		return err;
4883 
4884 	if (size <= hole_start)
4885 		return 0;
4886 
4887 	btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1,
4888 					   &cached_state);
4889 	cur_offset = hole_start;
4890 	while (1) {
4891 		em = btrfs_get_extent(inode, NULL, 0, cur_offset,
4892 				      block_end - cur_offset);
4893 		if (IS_ERR(em)) {
4894 			err = PTR_ERR(em);
4895 			em = NULL;
4896 			break;
4897 		}
4898 		last_byte = min(extent_map_end(em), block_end);
4899 		last_byte = ALIGN(last_byte, fs_info->sectorsize);
4900 		hole_size = last_byte - cur_offset;
4901 
4902 		if (!(em->flags & EXTENT_FLAG_PREALLOC)) {
4903 			struct extent_map *hole_em;
4904 
4905 			err = maybe_insert_hole(inode, cur_offset, hole_size);
4906 			if (err)
4907 				break;
4908 
4909 			err = btrfs_inode_set_file_extent_range(inode,
4910 							cur_offset, hole_size);
4911 			if (err)
4912 				break;
4913 
4914 			hole_em = alloc_extent_map();
4915 			if (!hole_em) {
4916 				btrfs_drop_extent_map_range(inode, cur_offset,
4917 						    cur_offset + hole_size - 1,
4918 						    false);
4919 				btrfs_set_inode_full_sync(inode);
4920 				goto next;
4921 			}
4922 			hole_em->start = cur_offset;
4923 			hole_em->len = hole_size;
4924 			hole_em->orig_start = cur_offset;
4925 
4926 			hole_em->block_start = EXTENT_MAP_HOLE;
4927 			hole_em->block_len = 0;
4928 			hole_em->orig_block_len = 0;
4929 			hole_em->ram_bytes = hole_size;
4930 			hole_em->generation = btrfs_get_fs_generation(fs_info);
4931 
4932 			err = btrfs_replace_extent_map_range(inode, hole_em, true);
4933 			free_extent_map(hole_em);
4934 		} else {
4935 			err = btrfs_inode_set_file_extent_range(inode,
4936 							cur_offset, hole_size);
4937 			if (err)
4938 				break;
4939 		}
4940 next:
4941 		free_extent_map(em);
4942 		em = NULL;
4943 		cur_offset = last_byte;
4944 		if (cur_offset >= block_end)
4945 			break;
4946 	}
4947 	free_extent_map(em);
4948 	unlock_extent(io_tree, hole_start, block_end - 1, &cached_state);
4949 	return err;
4950 }
4951 
4952 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
4953 {
4954 	struct btrfs_root *root = BTRFS_I(inode)->root;
4955 	struct btrfs_trans_handle *trans;
4956 	loff_t oldsize = i_size_read(inode);
4957 	loff_t newsize = attr->ia_size;
4958 	int mask = attr->ia_valid;
4959 	int ret;
4960 
4961 	/*
4962 	 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
4963 	 * special case where we need to update the times despite not having
4964 	 * these flags set.  For all other operations the VFS set these flags
4965 	 * explicitly if it wants a timestamp update.
4966 	 */
4967 	if (newsize != oldsize) {
4968 		inode_inc_iversion(inode);
4969 		if (!(mask & (ATTR_CTIME | ATTR_MTIME))) {
4970 			inode_set_mtime_to_ts(inode,
4971 					      inode_set_ctime_current(inode));
4972 		}
4973 	}
4974 
4975 	if (newsize > oldsize) {
4976 		/*
4977 		 * Don't do an expanding truncate while snapshotting is ongoing.
4978 		 * This is to ensure the snapshot captures a fully consistent
4979 		 * state of this file - if the snapshot captures this expanding
4980 		 * truncation, it must capture all writes that happened before
4981 		 * this truncation.
4982 		 */
4983 		btrfs_drew_write_lock(&root->snapshot_lock);
4984 		ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, newsize);
4985 		if (ret) {
4986 			btrfs_drew_write_unlock(&root->snapshot_lock);
4987 			return ret;
4988 		}
4989 
4990 		trans = btrfs_start_transaction(root, 1);
4991 		if (IS_ERR(trans)) {
4992 			btrfs_drew_write_unlock(&root->snapshot_lock);
4993 			return PTR_ERR(trans);
4994 		}
4995 
4996 		i_size_write(inode, newsize);
4997 		btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
4998 		pagecache_isize_extended(inode, oldsize, newsize);
4999 		ret = btrfs_update_inode(trans, BTRFS_I(inode));
5000 		btrfs_drew_write_unlock(&root->snapshot_lock);
5001 		btrfs_end_transaction(trans);
5002 	} else {
5003 		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5004 
5005 		if (btrfs_is_zoned(fs_info)) {
5006 			ret = btrfs_wait_ordered_range(inode,
5007 					ALIGN(newsize, fs_info->sectorsize),
5008 					(u64)-1);
5009 			if (ret)
5010 				return ret;
5011 		}
5012 
5013 		/*
5014 		 * We're truncating a file that used to have good data down to
5015 		 * zero. Make sure any new writes to the file get on disk
5016 		 * on close.
5017 		 */
5018 		if (newsize == 0)
5019 			set_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
5020 				&BTRFS_I(inode)->runtime_flags);
5021 
5022 		truncate_setsize(inode, newsize);
5023 
5024 		inode_dio_wait(inode);
5025 
5026 		ret = btrfs_truncate(BTRFS_I(inode), newsize == oldsize);
5027 		if (ret && inode->i_nlink) {
5028 			int err;
5029 
5030 			/*
5031 			 * Truncate failed, so fix up the in-memory size. We
5032 			 * adjusted disk_i_size down as we removed extents, so
5033 			 * wait for disk_i_size to be stable and then update the
5034 			 * in-memory size to match.
5035 			 */
5036 			err = btrfs_wait_ordered_range(inode, 0, (u64)-1);
5037 			if (err)
5038 				return err;
5039 			i_size_write(inode, BTRFS_I(inode)->disk_i_size);
5040 		}
5041 	}
5042 
5043 	return ret;
5044 }
5045 
5046 static int btrfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
5047 			 struct iattr *attr)
5048 {
5049 	struct inode *inode = d_inode(dentry);
5050 	struct btrfs_root *root = BTRFS_I(inode)->root;
5051 	int err;
5052 
5053 	if (btrfs_root_readonly(root))
5054 		return -EROFS;
5055 
5056 	err = setattr_prepare(idmap, dentry, attr);
5057 	if (err)
5058 		return err;
5059 
5060 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
5061 		err = btrfs_setsize(inode, attr);
5062 		if (err)
5063 			return err;
5064 	}
5065 
5066 	if (attr->ia_valid) {
5067 		setattr_copy(idmap, inode, attr);
5068 		inode_inc_iversion(inode);
5069 		err = btrfs_dirty_inode(BTRFS_I(inode));
5070 
5071 		if (!err && attr->ia_valid & ATTR_MODE)
5072 			err = posix_acl_chmod(idmap, dentry, inode->i_mode);
5073 	}
5074 
5075 	return err;
5076 }
5077 
5078 /*
5079  * While truncating the inode pages during eviction, we get the VFS
5080  * calling btrfs_invalidate_folio() against each folio of the inode. This
5081  * is slow because the calls to btrfs_invalidate_folio() result in a
5082  * huge amount of calls to lock_extent() and clear_extent_bit(),
5083  * which keep merging and splitting extent_state structures over and over,
5084  * wasting lots of time.
5085  *
5086  * Therefore if the inode is being evicted, let btrfs_invalidate_folio()
5087  * skip all those expensive operations on a per folio basis and do only
5088  * the ordered io finishing, while we release here the extent_map and
5089  * extent_state structures, without the excessive merging and splitting.
5090  */
5091 static void evict_inode_truncate_pages(struct inode *inode)
5092 {
5093 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5094 	struct rb_node *node;
5095 
5096 	ASSERT(inode->i_state & I_FREEING);
5097 	truncate_inode_pages_final(&inode->i_data);
5098 
5099 	btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
5100 
5101 	/*
5102 	 * Keep looping until we have no more ranges in the io tree.
5103 	 * We can have ongoing bios started by readahead that have
5104 	 * their endio callback (extent_io.c:end_bio_extent_readpage)
5105 	 * still in progress (unlocked the pages in the bio but did not yet
5106 	 * unlocked the ranges in the io tree). Therefore this means some
5107 	 * ranges can still be locked and eviction started because before
5108 	 * submitting those bios, which are executed by a separate task (work
5109 	 * queue kthread), inode references (inode->i_count) were not taken
5110 	 * (which would be dropped in the end io callback of each bio).
5111 	 * Therefore here we effectively end up waiting for those bios and
5112 	 * anyone else holding locked ranges without having bumped the inode's
5113 	 * reference count - if we don't do it, when they access the inode's
5114 	 * io_tree to unlock a range it may be too late, leading to an
5115 	 * use-after-free issue.
5116 	 */
5117 	spin_lock(&io_tree->lock);
5118 	while (!RB_EMPTY_ROOT(&io_tree->state)) {
5119 		struct extent_state *state;
5120 		struct extent_state *cached_state = NULL;
5121 		u64 start;
5122 		u64 end;
5123 		unsigned state_flags;
5124 
5125 		node = rb_first(&io_tree->state);
5126 		state = rb_entry(node, struct extent_state, rb_node);
5127 		start = state->start;
5128 		end = state->end;
5129 		state_flags = state->state;
5130 		spin_unlock(&io_tree->lock);
5131 
5132 		lock_extent(io_tree, start, end, &cached_state);
5133 
5134 		/*
5135 		 * If still has DELALLOC flag, the extent didn't reach disk,
5136 		 * and its reserved space won't be freed by delayed_ref.
5137 		 * So we need to free its reserved space here.
5138 		 * (Refer to comment in btrfs_invalidate_folio, case 2)
5139 		 *
5140 		 * Note, end is the bytenr of last byte, so we need + 1 here.
5141 		 */
5142 		if (state_flags & EXTENT_DELALLOC)
5143 			btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start,
5144 					       end - start + 1, NULL);
5145 
5146 		clear_extent_bit(io_tree, start, end,
5147 				 EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING,
5148 				 &cached_state);
5149 
5150 		cond_resched();
5151 		spin_lock(&io_tree->lock);
5152 	}
5153 	spin_unlock(&io_tree->lock);
5154 }
5155 
5156 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
5157 							struct btrfs_block_rsv *rsv)
5158 {
5159 	struct btrfs_fs_info *fs_info = root->fs_info;
5160 	struct btrfs_trans_handle *trans;
5161 	u64 delayed_refs_extra = btrfs_calc_delayed_ref_bytes(fs_info, 1);
5162 	int ret;
5163 
5164 	/*
5165 	 * Eviction should be taking place at some place safe because of our
5166 	 * delayed iputs.  However the normal flushing code will run delayed
5167 	 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock.
5168 	 *
5169 	 * We reserve the delayed_refs_extra here again because we can't use
5170 	 * btrfs_start_transaction(root, 0) for the same deadlocky reason as
5171 	 * above.  We reserve our extra bit here because we generate a ton of
5172 	 * delayed refs activity by truncating.
5173 	 *
5174 	 * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can,
5175 	 * if we fail to make this reservation we can re-try without the
5176 	 * delayed_refs_extra so we can make some forward progress.
5177 	 */
5178 	ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size + delayed_refs_extra,
5179 				     BTRFS_RESERVE_FLUSH_EVICT);
5180 	if (ret) {
5181 		ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size,
5182 					     BTRFS_RESERVE_FLUSH_EVICT);
5183 		if (ret) {
5184 			btrfs_warn(fs_info,
5185 				   "could not allocate space for delete; will truncate on mount");
5186 			return ERR_PTR(-ENOSPC);
5187 		}
5188 		delayed_refs_extra = 0;
5189 	}
5190 
5191 	trans = btrfs_join_transaction(root);
5192 	if (IS_ERR(trans))
5193 		return trans;
5194 
5195 	if (delayed_refs_extra) {
5196 		trans->block_rsv = &fs_info->trans_block_rsv;
5197 		trans->bytes_reserved = delayed_refs_extra;
5198 		btrfs_block_rsv_migrate(rsv, trans->block_rsv,
5199 					delayed_refs_extra, true);
5200 	}
5201 	return trans;
5202 }
5203 
5204 void btrfs_evict_inode(struct inode *inode)
5205 {
5206 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5207 	struct btrfs_trans_handle *trans;
5208 	struct btrfs_root *root = BTRFS_I(inode)->root;
5209 	struct btrfs_block_rsv *rsv = NULL;
5210 	int ret;
5211 
5212 	trace_btrfs_inode_evict(inode);
5213 
5214 	if (!root) {
5215 		fsverity_cleanup_inode(inode);
5216 		clear_inode(inode);
5217 		return;
5218 	}
5219 
5220 	evict_inode_truncate_pages(inode);
5221 
5222 	if (inode->i_nlink &&
5223 	    ((btrfs_root_refs(&root->root_item) != 0 &&
5224 	      root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
5225 	     btrfs_is_free_space_inode(BTRFS_I(inode))))
5226 		goto out;
5227 
5228 	if (is_bad_inode(inode))
5229 		goto out;
5230 
5231 	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
5232 		goto out;
5233 
5234 	if (inode->i_nlink > 0) {
5235 		BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
5236 		       root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
5237 		goto out;
5238 	}
5239 
5240 	/*
5241 	 * This makes sure the inode item in tree is uptodate and the space for
5242 	 * the inode update is released.
5243 	 */
5244 	ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode));
5245 	if (ret)
5246 		goto out;
5247 
5248 	/*
5249 	 * This drops any pending insert or delete operations we have for this
5250 	 * inode.  We could have a delayed dir index deletion queued up, but
5251 	 * we're removing the inode completely so that'll be taken care of in
5252 	 * the truncate.
5253 	 */
5254 	btrfs_kill_delayed_inode_items(BTRFS_I(inode));
5255 
5256 	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
5257 	if (!rsv)
5258 		goto out;
5259 	rsv->size = btrfs_calc_metadata_size(fs_info, 1);
5260 	rsv->failfast = true;
5261 
5262 	btrfs_i_size_write(BTRFS_I(inode), 0);
5263 
5264 	while (1) {
5265 		struct btrfs_truncate_control control = {
5266 			.inode = BTRFS_I(inode),
5267 			.ino = btrfs_ino(BTRFS_I(inode)),
5268 			.new_size = 0,
5269 			.min_type = 0,
5270 		};
5271 
5272 		trans = evict_refill_and_join(root, rsv);
5273 		if (IS_ERR(trans))
5274 			goto out;
5275 
5276 		trans->block_rsv = rsv;
5277 
5278 		ret = btrfs_truncate_inode_items(trans, root, &control);
5279 		trans->block_rsv = &fs_info->trans_block_rsv;
5280 		btrfs_end_transaction(trans);
5281 		/*
5282 		 * We have not added new delayed items for our inode after we
5283 		 * have flushed its delayed items, so no need to throttle on
5284 		 * delayed items. However we have modified extent buffers.
5285 		 */
5286 		btrfs_btree_balance_dirty_nodelay(fs_info);
5287 		if (ret && ret != -ENOSPC && ret != -EAGAIN)
5288 			goto out;
5289 		else if (!ret)
5290 			break;
5291 	}
5292 
5293 	/*
5294 	 * Errors here aren't a big deal, it just means we leave orphan items in
5295 	 * the tree. They will be cleaned up on the next mount. If the inode
5296 	 * number gets reused, cleanup deletes the orphan item without doing
5297 	 * anything, and unlink reuses the existing orphan item.
5298 	 *
5299 	 * If it turns out that we are dropping too many of these, we might want
5300 	 * to add a mechanism for retrying these after a commit.
5301 	 */
5302 	trans = evict_refill_and_join(root, rsv);
5303 	if (!IS_ERR(trans)) {
5304 		trans->block_rsv = rsv;
5305 		btrfs_orphan_del(trans, BTRFS_I(inode));
5306 		trans->block_rsv = &fs_info->trans_block_rsv;
5307 		btrfs_end_transaction(trans);
5308 	}
5309 
5310 out:
5311 	btrfs_free_block_rsv(fs_info, rsv);
5312 	/*
5313 	 * If we didn't successfully delete, the orphan item will still be in
5314 	 * the tree and we'll retry on the next mount. Again, we might also want
5315 	 * to retry these periodically in the future.
5316 	 */
5317 	btrfs_remove_delayed_node(BTRFS_I(inode));
5318 	fsverity_cleanup_inode(inode);
5319 	clear_inode(inode);
5320 }
5321 
5322 /*
5323  * Return the key found in the dir entry in the location pointer, fill @type
5324  * with BTRFS_FT_*, and return 0.
5325  *
5326  * If no dir entries were found, returns -ENOENT.
5327  * If found a corrupted location in dir entry, returns -EUCLEAN.
5328  */
5329 static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry,
5330 			       struct btrfs_key *location, u8 *type)
5331 {
5332 	struct btrfs_dir_item *di;
5333 	struct btrfs_path *path;
5334 	struct btrfs_root *root = dir->root;
5335 	int ret = 0;
5336 	struct fscrypt_name fname;
5337 
5338 	path = btrfs_alloc_path();
5339 	if (!path)
5340 		return -ENOMEM;
5341 
5342 	ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname);
5343 	if (ret < 0)
5344 		goto out;
5345 	/*
5346 	 * fscrypt_setup_filename() should never return a positive value, but
5347 	 * gcc on sparc/parisc thinks it can, so assert that doesn't happen.
5348 	 */
5349 	ASSERT(ret == 0);
5350 
5351 	/* This needs to handle no-key deletions later on */
5352 
5353 	di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir),
5354 				   &fname.disk_name, 0);
5355 	if (IS_ERR_OR_NULL(di)) {
5356 		ret = di ? PTR_ERR(di) : -ENOENT;
5357 		goto out;
5358 	}
5359 
5360 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
5361 	if (location->type != BTRFS_INODE_ITEM_KEY &&
5362 	    location->type != BTRFS_ROOT_ITEM_KEY) {
5363 		ret = -EUCLEAN;
5364 		btrfs_warn(root->fs_info,
5365 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
5366 			   __func__, fname.disk_name.name, btrfs_ino(dir),
5367 			   location->objectid, location->type, location->offset);
5368 	}
5369 	if (!ret)
5370 		*type = btrfs_dir_ftype(path->nodes[0], di);
5371 out:
5372 	fscrypt_free_filename(&fname);
5373 	btrfs_free_path(path);
5374 	return ret;
5375 }
5376 
5377 /*
5378  * when we hit a tree root in a directory, the btrfs part of the inode
5379  * needs to be changed to reflect the root directory of the tree root.  This
5380  * is kind of like crossing a mount point.
5381  */
5382 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
5383 				    struct btrfs_inode *dir,
5384 				    struct dentry *dentry,
5385 				    struct btrfs_key *location,
5386 				    struct btrfs_root **sub_root)
5387 {
5388 	struct btrfs_path *path;
5389 	struct btrfs_root *new_root;
5390 	struct btrfs_root_ref *ref;
5391 	struct extent_buffer *leaf;
5392 	struct btrfs_key key;
5393 	int ret;
5394 	int err = 0;
5395 	struct fscrypt_name fname;
5396 
5397 	ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 0, &fname);
5398 	if (ret)
5399 		return ret;
5400 
5401 	path = btrfs_alloc_path();
5402 	if (!path) {
5403 		err = -ENOMEM;
5404 		goto out;
5405 	}
5406 
5407 	err = -ENOENT;
5408 	key.objectid = dir->root->root_key.objectid;
5409 	key.type = BTRFS_ROOT_REF_KEY;
5410 	key.offset = location->objectid;
5411 
5412 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
5413 	if (ret) {
5414 		if (ret < 0)
5415 			err = ret;
5416 		goto out;
5417 	}
5418 
5419 	leaf = path->nodes[0];
5420 	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
5421 	if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
5422 	    btrfs_root_ref_name_len(leaf, ref) != fname.disk_name.len)
5423 		goto out;
5424 
5425 	ret = memcmp_extent_buffer(leaf, fname.disk_name.name,
5426 				   (unsigned long)(ref + 1), fname.disk_name.len);
5427 	if (ret)
5428 		goto out;
5429 
5430 	btrfs_release_path(path);
5431 
5432 	new_root = btrfs_get_fs_root(fs_info, location->objectid, true);
5433 	if (IS_ERR(new_root)) {
5434 		err = PTR_ERR(new_root);
5435 		goto out;
5436 	}
5437 
5438 	*sub_root = new_root;
5439 	location->objectid = btrfs_root_dirid(&new_root->root_item);
5440 	location->type = BTRFS_INODE_ITEM_KEY;
5441 	location->offset = 0;
5442 	err = 0;
5443 out:
5444 	btrfs_free_path(path);
5445 	fscrypt_free_filename(&fname);
5446 	return err;
5447 }
5448 
5449 static void inode_tree_add(struct btrfs_inode *inode)
5450 {
5451 	struct btrfs_root *root = inode->root;
5452 	struct btrfs_inode *entry;
5453 	struct rb_node **p;
5454 	struct rb_node *parent;
5455 	struct rb_node *new = &inode->rb_node;
5456 	u64 ino = btrfs_ino(inode);
5457 
5458 	if (inode_unhashed(&inode->vfs_inode))
5459 		return;
5460 	parent = NULL;
5461 	spin_lock(&root->inode_lock);
5462 	p = &root->inode_tree.rb_node;
5463 	while (*p) {
5464 		parent = *p;
5465 		entry = rb_entry(parent, struct btrfs_inode, rb_node);
5466 
5467 		if (ino < btrfs_ino(entry))
5468 			p = &parent->rb_left;
5469 		else if (ino > btrfs_ino(entry))
5470 			p = &parent->rb_right;
5471 		else {
5472 			WARN_ON(!(entry->vfs_inode.i_state &
5473 				  (I_WILL_FREE | I_FREEING)));
5474 			rb_replace_node(parent, new, &root->inode_tree);
5475 			RB_CLEAR_NODE(parent);
5476 			spin_unlock(&root->inode_lock);
5477 			return;
5478 		}
5479 	}
5480 	rb_link_node(new, parent, p);
5481 	rb_insert_color(new, &root->inode_tree);
5482 	spin_unlock(&root->inode_lock);
5483 }
5484 
5485 static void inode_tree_del(struct btrfs_inode *inode)
5486 {
5487 	struct btrfs_root *root = inode->root;
5488 	int empty = 0;
5489 
5490 	spin_lock(&root->inode_lock);
5491 	if (!RB_EMPTY_NODE(&inode->rb_node)) {
5492 		rb_erase(&inode->rb_node, &root->inode_tree);
5493 		RB_CLEAR_NODE(&inode->rb_node);
5494 		empty = RB_EMPTY_ROOT(&root->inode_tree);
5495 	}
5496 	spin_unlock(&root->inode_lock);
5497 
5498 	if (empty && btrfs_root_refs(&root->root_item) == 0) {
5499 		spin_lock(&root->inode_lock);
5500 		empty = RB_EMPTY_ROOT(&root->inode_tree);
5501 		spin_unlock(&root->inode_lock);
5502 		if (empty)
5503 			btrfs_add_dead_root(root);
5504 	}
5505 }
5506 
5507 
5508 static int btrfs_init_locked_inode(struct inode *inode, void *p)
5509 {
5510 	struct btrfs_iget_args *args = p;
5511 
5512 	inode->i_ino = args->ino;
5513 	BTRFS_I(inode)->location.objectid = args->ino;
5514 	BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
5515 	BTRFS_I(inode)->location.offset = 0;
5516 	BTRFS_I(inode)->root = btrfs_grab_root(args->root);
5517 	BUG_ON(args->root && !BTRFS_I(inode)->root);
5518 
5519 	if (args->root && args->root == args->root->fs_info->tree_root &&
5520 	    args->ino != BTRFS_BTREE_INODE_OBJECTID)
5521 		set_bit(BTRFS_INODE_FREE_SPACE_INODE,
5522 			&BTRFS_I(inode)->runtime_flags);
5523 	return 0;
5524 }
5525 
5526 static int btrfs_find_actor(struct inode *inode, void *opaque)
5527 {
5528 	struct btrfs_iget_args *args = opaque;
5529 
5530 	return args->ino == BTRFS_I(inode)->location.objectid &&
5531 		args->root == BTRFS_I(inode)->root;
5532 }
5533 
5534 static struct inode *btrfs_iget_locked(struct super_block *s, u64 ino,
5535 				       struct btrfs_root *root)
5536 {
5537 	struct inode *inode;
5538 	struct btrfs_iget_args args;
5539 	unsigned long hashval = btrfs_inode_hash(ino, root);
5540 
5541 	args.ino = ino;
5542 	args.root = root;
5543 
5544 	inode = iget5_locked(s, hashval, btrfs_find_actor,
5545 			     btrfs_init_locked_inode,
5546 			     (void *)&args);
5547 	return inode;
5548 }
5549 
5550 /*
5551  * Get an inode object given its inode number and corresponding root.
5552  * Path can be preallocated to prevent recursing back to iget through
5553  * allocator. NULL is also valid but may require an additional allocation
5554  * later.
5555  */
5556 struct inode *btrfs_iget_path(struct super_block *s, u64 ino,
5557 			      struct btrfs_root *root, struct btrfs_path *path)
5558 {
5559 	struct inode *inode;
5560 
5561 	inode = btrfs_iget_locked(s, ino, root);
5562 	if (!inode)
5563 		return ERR_PTR(-ENOMEM);
5564 
5565 	if (inode->i_state & I_NEW) {
5566 		int ret;
5567 
5568 		ret = btrfs_read_locked_inode(inode, path);
5569 		if (!ret) {
5570 			inode_tree_add(BTRFS_I(inode));
5571 			unlock_new_inode(inode);
5572 		} else {
5573 			iget_failed(inode);
5574 			/*
5575 			 * ret > 0 can come from btrfs_search_slot called by
5576 			 * btrfs_read_locked_inode, this means the inode item
5577 			 * was not found.
5578 			 */
5579 			if (ret > 0)
5580 				ret = -ENOENT;
5581 			inode = ERR_PTR(ret);
5582 		}
5583 	}
5584 
5585 	return inode;
5586 }
5587 
5588 struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root)
5589 {
5590 	return btrfs_iget_path(s, ino, root, NULL);
5591 }
5592 
5593 static struct inode *new_simple_dir(struct inode *dir,
5594 				    struct btrfs_key *key,
5595 				    struct btrfs_root *root)
5596 {
5597 	struct timespec64 ts;
5598 	struct inode *inode = new_inode(dir->i_sb);
5599 
5600 	if (!inode)
5601 		return ERR_PTR(-ENOMEM);
5602 
5603 	BTRFS_I(inode)->root = btrfs_grab_root(root);
5604 	memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
5605 	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
5606 
5607 	inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
5608 	/*
5609 	 * We only need lookup, the rest is read-only and there's no inode
5610 	 * associated with the dentry
5611 	 */
5612 	inode->i_op = &simple_dir_inode_operations;
5613 	inode->i_opflags &= ~IOP_XATTR;
5614 	inode->i_fop = &simple_dir_operations;
5615 	inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5616 
5617 	ts = inode_set_ctime_current(inode);
5618 	inode_set_mtime_to_ts(inode, ts);
5619 	inode_set_atime_to_ts(inode, inode_get_atime(dir));
5620 	BTRFS_I(inode)->i_otime_sec = ts.tv_sec;
5621 	BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec;
5622 
5623 	inode->i_uid = dir->i_uid;
5624 	inode->i_gid = dir->i_gid;
5625 
5626 	return inode;
5627 }
5628 
5629 static_assert(BTRFS_FT_UNKNOWN == FT_UNKNOWN);
5630 static_assert(BTRFS_FT_REG_FILE == FT_REG_FILE);
5631 static_assert(BTRFS_FT_DIR == FT_DIR);
5632 static_assert(BTRFS_FT_CHRDEV == FT_CHRDEV);
5633 static_assert(BTRFS_FT_BLKDEV == FT_BLKDEV);
5634 static_assert(BTRFS_FT_FIFO == FT_FIFO);
5635 static_assert(BTRFS_FT_SOCK == FT_SOCK);
5636 static_assert(BTRFS_FT_SYMLINK == FT_SYMLINK);
5637 
5638 static inline u8 btrfs_inode_type(struct inode *inode)
5639 {
5640 	return fs_umode_to_ftype(inode->i_mode);
5641 }
5642 
5643 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
5644 {
5645 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
5646 	struct inode *inode;
5647 	struct btrfs_root *root = BTRFS_I(dir)->root;
5648 	struct btrfs_root *sub_root = root;
5649 	struct btrfs_key location;
5650 	u8 di_type = 0;
5651 	int ret = 0;
5652 
5653 	if (dentry->d_name.len > BTRFS_NAME_LEN)
5654 		return ERR_PTR(-ENAMETOOLONG);
5655 
5656 	ret = btrfs_inode_by_name(BTRFS_I(dir), dentry, &location, &di_type);
5657 	if (ret < 0)
5658 		return ERR_PTR(ret);
5659 
5660 	if (location.type == BTRFS_INODE_ITEM_KEY) {
5661 		inode = btrfs_iget(dir->i_sb, location.objectid, root);
5662 		if (IS_ERR(inode))
5663 			return inode;
5664 
5665 		/* Do extra check against inode mode with di_type */
5666 		if (btrfs_inode_type(inode) != di_type) {
5667 			btrfs_crit(fs_info,
5668 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u",
5669 				  inode->i_mode, btrfs_inode_type(inode),
5670 				  di_type);
5671 			iput(inode);
5672 			return ERR_PTR(-EUCLEAN);
5673 		}
5674 		return inode;
5675 	}
5676 
5677 	ret = fixup_tree_root_location(fs_info, BTRFS_I(dir), dentry,
5678 				       &location, &sub_root);
5679 	if (ret < 0) {
5680 		if (ret != -ENOENT)
5681 			inode = ERR_PTR(ret);
5682 		else
5683 			inode = new_simple_dir(dir, &location, root);
5684 	} else {
5685 		inode = btrfs_iget(dir->i_sb, location.objectid, sub_root);
5686 		btrfs_put_root(sub_root);
5687 
5688 		if (IS_ERR(inode))
5689 			return inode;
5690 
5691 		down_read(&fs_info->cleanup_work_sem);
5692 		if (!sb_rdonly(inode->i_sb))
5693 			ret = btrfs_orphan_cleanup(sub_root);
5694 		up_read(&fs_info->cleanup_work_sem);
5695 		if (ret) {
5696 			iput(inode);
5697 			inode = ERR_PTR(ret);
5698 		}
5699 	}
5700 
5701 	return inode;
5702 }
5703 
5704 static int btrfs_dentry_delete(const struct dentry *dentry)
5705 {
5706 	struct btrfs_root *root;
5707 	struct inode *inode = d_inode(dentry);
5708 
5709 	if (!inode && !IS_ROOT(dentry))
5710 		inode = d_inode(dentry->d_parent);
5711 
5712 	if (inode) {
5713 		root = BTRFS_I(inode)->root;
5714 		if (btrfs_root_refs(&root->root_item) == 0)
5715 			return 1;
5716 
5717 		if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5718 			return 1;
5719 	}
5720 	return 0;
5721 }
5722 
5723 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
5724 				   unsigned int flags)
5725 {
5726 	struct inode *inode = btrfs_lookup_dentry(dir, dentry);
5727 
5728 	if (inode == ERR_PTR(-ENOENT))
5729 		inode = NULL;
5730 	return d_splice_alias(inode, dentry);
5731 }
5732 
5733 /*
5734  * Find the highest existing sequence number in a directory and then set the
5735  * in-memory index_cnt variable to the first free sequence number.
5736  */
5737 static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
5738 {
5739 	struct btrfs_root *root = inode->root;
5740 	struct btrfs_key key, found_key;
5741 	struct btrfs_path *path;
5742 	struct extent_buffer *leaf;
5743 	int ret;
5744 
5745 	key.objectid = btrfs_ino(inode);
5746 	key.type = BTRFS_DIR_INDEX_KEY;
5747 	key.offset = (u64)-1;
5748 
5749 	path = btrfs_alloc_path();
5750 	if (!path)
5751 		return -ENOMEM;
5752 
5753 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5754 	if (ret < 0)
5755 		goto out;
5756 	/* FIXME: we should be able to handle this */
5757 	if (ret == 0)
5758 		goto out;
5759 	ret = 0;
5760 
5761 	if (path->slots[0] == 0) {
5762 		inode->index_cnt = BTRFS_DIR_START_INDEX;
5763 		goto out;
5764 	}
5765 
5766 	path->slots[0]--;
5767 
5768 	leaf = path->nodes[0];
5769 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5770 
5771 	if (found_key.objectid != btrfs_ino(inode) ||
5772 	    found_key.type != BTRFS_DIR_INDEX_KEY) {
5773 		inode->index_cnt = BTRFS_DIR_START_INDEX;
5774 		goto out;
5775 	}
5776 
5777 	inode->index_cnt = found_key.offset + 1;
5778 out:
5779 	btrfs_free_path(path);
5780 	return ret;
5781 }
5782 
5783 static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index)
5784 {
5785 	int ret = 0;
5786 
5787 	btrfs_inode_lock(dir, 0);
5788 	if (dir->index_cnt == (u64)-1) {
5789 		ret = btrfs_inode_delayed_dir_index_count(dir);
5790 		if (ret) {
5791 			ret = btrfs_set_inode_index_count(dir);
5792 			if (ret)
5793 				goto out;
5794 		}
5795 	}
5796 
5797 	/* index_cnt is the index number of next new entry, so decrement it. */
5798 	*index = dir->index_cnt - 1;
5799 out:
5800 	btrfs_inode_unlock(dir, 0);
5801 
5802 	return ret;
5803 }
5804 
5805 /*
5806  * All this infrastructure exists because dir_emit can fault, and we are holding
5807  * the tree lock when doing readdir.  For now just allocate a buffer and copy
5808  * our information into that, and then dir_emit from the buffer.  This is
5809  * similar to what NFS does, only we don't keep the buffer around in pagecache
5810  * because I'm afraid I'll mess that up.  Long term we need to make filldir do
5811  * copy_to_user_inatomic so we don't have to worry about page faulting under the
5812  * tree lock.
5813  */
5814 static int btrfs_opendir(struct inode *inode, struct file *file)
5815 {
5816 	struct btrfs_file_private *private;
5817 	u64 last_index;
5818 	int ret;
5819 
5820 	ret = btrfs_get_dir_last_index(BTRFS_I(inode), &last_index);
5821 	if (ret)
5822 		return ret;
5823 
5824 	private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL);
5825 	if (!private)
5826 		return -ENOMEM;
5827 	private->last_index = last_index;
5828 	private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
5829 	if (!private->filldir_buf) {
5830 		kfree(private);
5831 		return -ENOMEM;
5832 	}
5833 	file->private_data = private;
5834 	return 0;
5835 }
5836 
5837 static loff_t btrfs_dir_llseek(struct file *file, loff_t offset, int whence)
5838 {
5839 	struct btrfs_file_private *private = file->private_data;
5840 	int ret;
5841 
5842 	ret = btrfs_get_dir_last_index(BTRFS_I(file_inode(file)),
5843 				       &private->last_index);
5844 	if (ret)
5845 		return ret;
5846 
5847 	return generic_file_llseek(file, offset, whence);
5848 }
5849 
5850 struct dir_entry {
5851 	u64 ino;
5852 	u64 offset;
5853 	unsigned type;
5854 	int name_len;
5855 };
5856 
5857 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
5858 {
5859 	while (entries--) {
5860 		struct dir_entry *entry = addr;
5861 		char *name = (char *)(entry + 1);
5862 
5863 		ctx->pos = get_unaligned(&entry->offset);
5864 		if (!dir_emit(ctx, name, get_unaligned(&entry->name_len),
5865 					 get_unaligned(&entry->ino),
5866 					 get_unaligned(&entry->type)))
5867 			return 1;
5868 		addr += sizeof(struct dir_entry) +
5869 			get_unaligned(&entry->name_len);
5870 		ctx->pos++;
5871 	}
5872 	return 0;
5873 }
5874 
5875 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5876 {
5877 	struct inode *inode = file_inode(file);
5878 	struct btrfs_root *root = BTRFS_I(inode)->root;
5879 	struct btrfs_file_private *private = file->private_data;
5880 	struct btrfs_dir_item *di;
5881 	struct btrfs_key key;
5882 	struct btrfs_key found_key;
5883 	struct btrfs_path *path;
5884 	void *addr;
5885 	LIST_HEAD(ins_list);
5886 	LIST_HEAD(del_list);
5887 	int ret;
5888 	char *name_ptr;
5889 	int name_len;
5890 	int entries = 0;
5891 	int total_len = 0;
5892 	bool put = false;
5893 	struct btrfs_key location;
5894 
5895 	if (!dir_emit_dots(file, ctx))
5896 		return 0;
5897 
5898 	path = btrfs_alloc_path();
5899 	if (!path)
5900 		return -ENOMEM;
5901 
5902 	addr = private->filldir_buf;
5903 	path->reada = READA_FORWARD;
5904 
5905 	put = btrfs_readdir_get_delayed_items(inode, private->last_index,
5906 					      &ins_list, &del_list);
5907 
5908 again:
5909 	key.type = BTRFS_DIR_INDEX_KEY;
5910 	key.offset = ctx->pos;
5911 	key.objectid = btrfs_ino(BTRFS_I(inode));
5912 
5913 	btrfs_for_each_slot(root, &key, &found_key, path, ret) {
5914 		struct dir_entry *entry;
5915 		struct extent_buffer *leaf = path->nodes[0];
5916 		u8 ftype;
5917 
5918 		if (found_key.objectid != key.objectid)
5919 			break;
5920 		if (found_key.type != BTRFS_DIR_INDEX_KEY)
5921 			break;
5922 		if (found_key.offset < ctx->pos)
5923 			continue;
5924 		if (found_key.offset > private->last_index)
5925 			break;
5926 		if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
5927 			continue;
5928 		di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
5929 		name_len = btrfs_dir_name_len(leaf, di);
5930 		if ((total_len + sizeof(struct dir_entry) + name_len) >=
5931 		    PAGE_SIZE) {
5932 			btrfs_release_path(path);
5933 			ret = btrfs_filldir(private->filldir_buf, entries, ctx);
5934 			if (ret)
5935 				goto nopos;
5936 			addr = private->filldir_buf;
5937 			entries = 0;
5938 			total_len = 0;
5939 			goto again;
5940 		}
5941 
5942 		ftype = btrfs_dir_flags_to_ftype(btrfs_dir_flags(leaf, di));
5943 		entry = addr;
5944 		name_ptr = (char *)(entry + 1);
5945 		read_extent_buffer(leaf, name_ptr,
5946 				   (unsigned long)(di + 1), name_len);
5947 		put_unaligned(name_len, &entry->name_len);
5948 		put_unaligned(fs_ftype_to_dtype(ftype), &entry->type);
5949 		btrfs_dir_item_key_to_cpu(leaf, di, &location);
5950 		put_unaligned(location.objectid, &entry->ino);
5951 		put_unaligned(found_key.offset, &entry->offset);
5952 		entries++;
5953 		addr += sizeof(struct dir_entry) + name_len;
5954 		total_len += sizeof(struct dir_entry) + name_len;
5955 	}
5956 	/* Catch error encountered during iteration */
5957 	if (ret < 0)
5958 		goto err;
5959 
5960 	btrfs_release_path(path);
5961 
5962 	ret = btrfs_filldir(private->filldir_buf, entries, ctx);
5963 	if (ret)
5964 		goto nopos;
5965 
5966 	ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
5967 	if (ret)
5968 		goto nopos;
5969 
5970 	/*
5971 	 * Stop new entries from being returned after we return the last
5972 	 * entry.
5973 	 *
5974 	 * New directory entries are assigned a strictly increasing
5975 	 * offset.  This means that new entries created during readdir
5976 	 * are *guaranteed* to be seen in the future by that readdir.
5977 	 * This has broken buggy programs which operate on names as
5978 	 * they're returned by readdir.  Until we re-use freed offsets
5979 	 * we have this hack to stop new entries from being returned
5980 	 * under the assumption that they'll never reach this huge
5981 	 * offset.
5982 	 *
5983 	 * This is being careful not to overflow 32bit loff_t unless the
5984 	 * last entry requires it because doing so has broken 32bit apps
5985 	 * in the past.
5986 	 */
5987 	if (ctx->pos >= INT_MAX)
5988 		ctx->pos = LLONG_MAX;
5989 	else
5990 		ctx->pos = INT_MAX;
5991 nopos:
5992 	ret = 0;
5993 err:
5994 	if (put)
5995 		btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list);
5996 	btrfs_free_path(path);
5997 	return ret;
5998 }
5999 
6000 /*
6001  * This is somewhat expensive, updating the tree every time the
6002  * inode changes.  But, it is most likely to find the inode in cache.
6003  * FIXME, needs more benchmarking...there are no reasons other than performance
6004  * to keep or drop this code.
6005  */
6006 static int btrfs_dirty_inode(struct btrfs_inode *inode)
6007 {
6008 	struct btrfs_root *root = inode->root;
6009 	struct btrfs_fs_info *fs_info = root->fs_info;
6010 	struct btrfs_trans_handle *trans;
6011 	int ret;
6012 
6013 	if (test_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags))
6014 		return 0;
6015 
6016 	trans = btrfs_join_transaction(root);
6017 	if (IS_ERR(trans))
6018 		return PTR_ERR(trans);
6019 
6020 	ret = btrfs_update_inode(trans, inode);
6021 	if (ret == -ENOSPC || ret == -EDQUOT) {
6022 		/* whoops, lets try again with the full transaction */
6023 		btrfs_end_transaction(trans);
6024 		trans = btrfs_start_transaction(root, 1);
6025 		if (IS_ERR(trans))
6026 			return PTR_ERR(trans);
6027 
6028 		ret = btrfs_update_inode(trans, inode);
6029 	}
6030 	btrfs_end_transaction(trans);
6031 	if (inode->delayed_node)
6032 		btrfs_balance_delayed_items(fs_info);
6033 
6034 	return ret;
6035 }
6036 
6037 /*
6038  * This is a copy of file_update_time.  We need this so we can return error on
6039  * ENOSPC for updating the inode in the case of file write and mmap writes.
6040  */
6041 static int btrfs_update_time(struct inode *inode, int flags)
6042 {
6043 	struct btrfs_root *root = BTRFS_I(inode)->root;
6044 	bool dirty;
6045 
6046 	if (btrfs_root_readonly(root))
6047 		return -EROFS;
6048 
6049 	dirty = inode_update_timestamps(inode, flags);
6050 	return dirty ? btrfs_dirty_inode(BTRFS_I(inode)) : 0;
6051 }
6052 
6053 /*
6054  * helper to find a free sequence number in a given directory.  This current
6055  * code is very simple, later versions will do smarter things in the btree
6056  */
6057 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index)
6058 {
6059 	int ret = 0;
6060 
6061 	if (dir->index_cnt == (u64)-1) {
6062 		ret = btrfs_inode_delayed_dir_index_count(dir);
6063 		if (ret) {
6064 			ret = btrfs_set_inode_index_count(dir);
6065 			if (ret)
6066 				return ret;
6067 		}
6068 	}
6069 
6070 	*index = dir->index_cnt;
6071 	dir->index_cnt++;
6072 
6073 	return ret;
6074 }
6075 
6076 static int btrfs_insert_inode_locked(struct inode *inode)
6077 {
6078 	struct btrfs_iget_args args;
6079 
6080 	args.ino = BTRFS_I(inode)->location.objectid;
6081 	args.root = BTRFS_I(inode)->root;
6082 
6083 	return insert_inode_locked4(inode,
6084 		   btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
6085 		   btrfs_find_actor, &args);
6086 }
6087 
6088 int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args,
6089 			    unsigned int *trans_num_items)
6090 {
6091 	struct inode *dir = args->dir;
6092 	struct inode *inode = args->inode;
6093 	int ret;
6094 
6095 	if (!args->orphan) {
6096 		ret = fscrypt_setup_filename(dir, &args->dentry->d_name, 0,
6097 					     &args->fname);
6098 		if (ret)
6099 			return ret;
6100 	}
6101 
6102 	ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl);
6103 	if (ret) {
6104 		fscrypt_free_filename(&args->fname);
6105 		return ret;
6106 	}
6107 
6108 	/* 1 to add inode item */
6109 	*trans_num_items = 1;
6110 	/* 1 to add compression property */
6111 	if (BTRFS_I(dir)->prop_compress)
6112 		(*trans_num_items)++;
6113 	/* 1 to add default ACL xattr */
6114 	if (args->default_acl)
6115 		(*trans_num_items)++;
6116 	/* 1 to add access ACL xattr */
6117 	if (args->acl)
6118 		(*trans_num_items)++;
6119 #ifdef CONFIG_SECURITY
6120 	/* 1 to add LSM xattr */
6121 	if (dir->i_security)
6122 		(*trans_num_items)++;
6123 #endif
6124 	if (args->orphan) {
6125 		/* 1 to add orphan item */
6126 		(*trans_num_items)++;
6127 	} else {
6128 		/*
6129 		 * 1 to add dir item
6130 		 * 1 to add dir index
6131 		 * 1 to update parent inode item
6132 		 *
6133 		 * No need for 1 unit for the inode ref item because it is
6134 		 * inserted in a batch together with the inode item at
6135 		 * btrfs_create_new_inode().
6136 		 */
6137 		*trans_num_items += 3;
6138 	}
6139 	return 0;
6140 }
6141 
6142 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args)
6143 {
6144 	posix_acl_release(args->acl);
6145 	posix_acl_release(args->default_acl);
6146 	fscrypt_free_filename(&args->fname);
6147 }
6148 
6149 /*
6150  * Inherit flags from the parent inode.
6151  *
6152  * Currently only the compression flags and the cow flags are inherited.
6153  */
6154 static void btrfs_inherit_iflags(struct btrfs_inode *inode, struct btrfs_inode *dir)
6155 {
6156 	unsigned int flags;
6157 
6158 	flags = dir->flags;
6159 
6160 	if (flags & BTRFS_INODE_NOCOMPRESS) {
6161 		inode->flags &= ~BTRFS_INODE_COMPRESS;
6162 		inode->flags |= BTRFS_INODE_NOCOMPRESS;
6163 	} else if (flags & BTRFS_INODE_COMPRESS) {
6164 		inode->flags &= ~BTRFS_INODE_NOCOMPRESS;
6165 		inode->flags |= BTRFS_INODE_COMPRESS;
6166 	}
6167 
6168 	if (flags & BTRFS_INODE_NODATACOW) {
6169 		inode->flags |= BTRFS_INODE_NODATACOW;
6170 		if (S_ISREG(inode->vfs_inode.i_mode))
6171 			inode->flags |= BTRFS_INODE_NODATASUM;
6172 	}
6173 
6174 	btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode);
6175 }
6176 
6177 int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
6178 			   struct btrfs_new_inode_args *args)
6179 {
6180 	struct timespec64 ts;
6181 	struct inode *dir = args->dir;
6182 	struct inode *inode = args->inode;
6183 	const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name;
6184 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6185 	struct btrfs_root *root;
6186 	struct btrfs_inode_item *inode_item;
6187 	struct btrfs_key *location;
6188 	struct btrfs_path *path;
6189 	u64 objectid;
6190 	struct btrfs_inode_ref *ref;
6191 	struct btrfs_key key[2];
6192 	u32 sizes[2];
6193 	struct btrfs_item_batch batch;
6194 	unsigned long ptr;
6195 	int ret;
6196 
6197 	path = btrfs_alloc_path();
6198 	if (!path)
6199 		return -ENOMEM;
6200 
6201 	if (!args->subvol)
6202 		BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root);
6203 	root = BTRFS_I(inode)->root;
6204 
6205 	ret = btrfs_get_free_objectid(root, &objectid);
6206 	if (ret)
6207 		goto out;
6208 	inode->i_ino = objectid;
6209 
6210 	if (args->orphan) {
6211 		/*
6212 		 * O_TMPFILE, set link count to 0, so that after this point, we
6213 		 * fill in an inode item with the correct link count.
6214 		 */
6215 		set_nlink(inode, 0);
6216 	} else {
6217 		trace_btrfs_inode_request(dir);
6218 
6219 		ret = btrfs_set_inode_index(BTRFS_I(dir), &BTRFS_I(inode)->dir_index);
6220 		if (ret)
6221 			goto out;
6222 	}
6223 	/* index_cnt is ignored for everything but a dir. */
6224 	BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX;
6225 	BTRFS_I(inode)->generation = trans->transid;
6226 	inode->i_generation = BTRFS_I(inode)->generation;
6227 
6228 	/*
6229 	 * We don't have any capability xattrs set here yet, shortcut any
6230 	 * queries for the xattrs here.  If we add them later via the inode
6231 	 * security init path or any other path this flag will be cleared.
6232 	 */
6233 	set_bit(BTRFS_INODE_NO_CAP_XATTR, &BTRFS_I(inode)->runtime_flags);
6234 
6235 	/*
6236 	 * Subvolumes don't inherit flags from their parent directory.
6237 	 * Originally this was probably by accident, but we probably can't
6238 	 * change it now without compatibility issues.
6239 	 */
6240 	if (!args->subvol)
6241 		btrfs_inherit_iflags(BTRFS_I(inode), BTRFS_I(dir));
6242 
6243 	if (S_ISREG(inode->i_mode)) {
6244 		if (btrfs_test_opt(fs_info, NODATASUM))
6245 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6246 		if (btrfs_test_opt(fs_info, NODATACOW))
6247 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
6248 				BTRFS_INODE_NODATASUM;
6249 	}
6250 
6251 	location = &BTRFS_I(inode)->location;
6252 	location->objectid = objectid;
6253 	location->offset = 0;
6254 	location->type = BTRFS_INODE_ITEM_KEY;
6255 
6256 	ret = btrfs_insert_inode_locked(inode);
6257 	if (ret < 0) {
6258 		if (!args->orphan)
6259 			BTRFS_I(dir)->index_cnt--;
6260 		goto out;
6261 	}
6262 
6263 	/*
6264 	 * We could have gotten an inode number from somebody who was fsynced
6265 	 * and then removed in this same transaction, so let's just set full
6266 	 * sync since it will be a full sync anyway and this will blow away the
6267 	 * old info in the log.
6268 	 */
6269 	btrfs_set_inode_full_sync(BTRFS_I(inode));
6270 
6271 	key[0].objectid = objectid;
6272 	key[0].type = BTRFS_INODE_ITEM_KEY;
6273 	key[0].offset = 0;
6274 
6275 	sizes[0] = sizeof(struct btrfs_inode_item);
6276 
6277 	if (!args->orphan) {
6278 		/*
6279 		 * Start new inodes with an inode_ref. This is slightly more
6280 		 * efficient for small numbers of hard links since they will
6281 		 * be packed into one item. Extended refs will kick in if we
6282 		 * add more hard links than can fit in the ref item.
6283 		 */
6284 		key[1].objectid = objectid;
6285 		key[1].type = BTRFS_INODE_REF_KEY;
6286 		if (args->subvol) {
6287 			key[1].offset = objectid;
6288 			sizes[1] = 2 + sizeof(*ref);
6289 		} else {
6290 			key[1].offset = btrfs_ino(BTRFS_I(dir));
6291 			sizes[1] = name->len + sizeof(*ref);
6292 		}
6293 	}
6294 
6295 	batch.keys = &key[0];
6296 	batch.data_sizes = &sizes[0];
6297 	batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]);
6298 	batch.nr = args->orphan ? 1 : 2;
6299 	ret = btrfs_insert_empty_items(trans, root, path, &batch);
6300 	if (ret != 0) {
6301 		btrfs_abort_transaction(trans, ret);
6302 		goto discard;
6303 	}
6304 
6305 	ts = simple_inode_init_ts(inode);
6306 	BTRFS_I(inode)->i_otime_sec = ts.tv_sec;
6307 	BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec;
6308 
6309 	/*
6310 	 * We're going to fill the inode item now, so at this point the inode
6311 	 * must be fully initialized.
6312 	 */
6313 
6314 	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
6315 				  struct btrfs_inode_item);
6316 	memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item,
6317 			     sizeof(*inode_item));
6318 	fill_inode_item(trans, path->nodes[0], inode_item, inode);
6319 
6320 	if (!args->orphan) {
6321 		ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
6322 				     struct btrfs_inode_ref);
6323 		ptr = (unsigned long)(ref + 1);
6324 		if (args->subvol) {
6325 			btrfs_set_inode_ref_name_len(path->nodes[0], ref, 2);
6326 			btrfs_set_inode_ref_index(path->nodes[0], ref, 0);
6327 			write_extent_buffer(path->nodes[0], "..", ptr, 2);
6328 		} else {
6329 			btrfs_set_inode_ref_name_len(path->nodes[0], ref,
6330 						     name->len);
6331 			btrfs_set_inode_ref_index(path->nodes[0], ref,
6332 						  BTRFS_I(inode)->dir_index);
6333 			write_extent_buffer(path->nodes[0], name->name, ptr,
6334 					    name->len);
6335 		}
6336 	}
6337 
6338 	btrfs_mark_buffer_dirty(trans, path->nodes[0]);
6339 	/*
6340 	 * We don't need the path anymore, plus inheriting properties, adding
6341 	 * ACLs, security xattrs, orphan item or adding the link, will result in
6342 	 * allocating yet another path. So just free our path.
6343 	 */
6344 	btrfs_free_path(path);
6345 	path = NULL;
6346 
6347 	if (args->subvol) {
6348 		struct inode *parent;
6349 
6350 		/*
6351 		 * Subvolumes inherit properties from their parent subvolume,
6352 		 * not the directory they were created in.
6353 		 */
6354 		parent = btrfs_iget(fs_info->sb, BTRFS_FIRST_FREE_OBJECTID,
6355 				    BTRFS_I(dir)->root);
6356 		if (IS_ERR(parent)) {
6357 			ret = PTR_ERR(parent);
6358 		} else {
6359 			ret = btrfs_inode_inherit_props(trans, inode, parent);
6360 			iput(parent);
6361 		}
6362 	} else {
6363 		ret = btrfs_inode_inherit_props(trans, inode, dir);
6364 	}
6365 	if (ret) {
6366 		btrfs_err(fs_info,
6367 			  "error inheriting props for ino %llu (root %llu): %d",
6368 			  btrfs_ino(BTRFS_I(inode)), root->root_key.objectid,
6369 			  ret);
6370 	}
6371 
6372 	/*
6373 	 * Subvolumes don't inherit ACLs or get passed to the LSM. This is
6374 	 * probably a bug.
6375 	 */
6376 	if (!args->subvol) {
6377 		ret = btrfs_init_inode_security(trans, args);
6378 		if (ret) {
6379 			btrfs_abort_transaction(trans, ret);
6380 			goto discard;
6381 		}
6382 	}
6383 
6384 	inode_tree_add(BTRFS_I(inode));
6385 
6386 	trace_btrfs_inode_new(inode);
6387 	btrfs_set_inode_last_trans(trans, BTRFS_I(inode));
6388 
6389 	btrfs_update_root_times(trans, root);
6390 
6391 	if (args->orphan) {
6392 		ret = btrfs_orphan_add(trans, BTRFS_I(inode));
6393 	} else {
6394 		ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
6395 				     0, BTRFS_I(inode)->dir_index);
6396 	}
6397 	if (ret) {
6398 		btrfs_abort_transaction(trans, ret);
6399 		goto discard;
6400 	}
6401 
6402 	return 0;
6403 
6404 discard:
6405 	/*
6406 	 * discard_new_inode() calls iput(), but the caller owns the reference
6407 	 * to the inode.
6408 	 */
6409 	ihold(inode);
6410 	discard_new_inode(inode);
6411 out:
6412 	btrfs_free_path(path);
6413 	return ret;
6414 }
6415 
6416 /*
6417  * utility function to add 'inode' into 'parent_inode' with
6418  * a give name and a given sequence number.
6419  * if 'add_backref' is true, also insert a backref from the
6420  * inode to the parent directory.
6421  */
6422 int btrfs_add_link(struct btrfs_trans_handle *trans,
6423 		   struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
6424 		   const struct fscrypt_str *name, int add_backref, u64 index)
6425 {
6426 	int ret = 0;
6427 	struct btrfs_key key;
6428 	struct btrfs_root *root = parent_inode->root;
6429 	u64 ino = btrfs_ino(inode);
6430 	u64 parent_ino = btrfs_ino(parent_inode);
6431 
6432 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6433 		memcpy(&key, &inode->root->root_key, sizeof(key));
6434 	} else {
6435 		key.objectid = ino;
6436 		key.type = BTRFS_INODE_ITEM_KEY;
6437 		key.offset = 0;
6438 	}
6439 
6440 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6441 		ret = btrfs_add_root_ref(trans, key.objectid,
6442 					 root->root_key.objectid, parent_ino,
6443 					 index, name);
6444 	} else if (add_backref) {
6445 		ret = btrfs_insert_inode_ref(trans, root, name,
6446 					     ino, parent_ino, index);
6447 	}
6448 
6449 	/* Nothing to clean up yet */
6450 	if (ret)
6451 		return ret;
6452 
6453 	ret = btrfs_insert_dir_item(trans, name, parent_inode, &key,
6454 				    btrfs_inode_type(&inode->vfs_inode), index);
6455 	if (ret == -EEXIST || ret == -EOVERFLOW)
6456 		goto fail_dir_item;
6457 	else if (ret) {
6458 		btrfs_abort_transaction(trans, ret);
6459 		return ret;
6460 	}
6461 
6462 	btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
6463 			   name->len * 2);
6464 	inode_inc_iversion(&parent_inode->vfs_inode);
6465 	/*
6466 	 * If we are replaying a log tree, we do not want to update the mtime
6467 	 * and ctime of the parent directory with the current time, since the
6468 	 * log replay procedure is responsible for setting them to their correct
6469 	 * values (the ones it had when the fsync was done).
6470 	 */
6471 	if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags))
6472 		inode_set_mtime_to_ts(&parent_inode->vfs_inode,
6473 				      inode_set_ctime_current(&parent_inode->vfs_inode));
6474 
6475 	ret = btrfs_update_inode(trans, parent_inode);
6476 	if (ret)
6477 		btrfs_abort_transaction(trans, ret);
6478 	return ret;
6479 
6480 fail_dir_item:
6481 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6482 		u64 local_index;
6483 		int err;
6484 		err = btrfs_del_root_ref(trans, key.objectid,
6485 					 root->root_key.objectid, parent_ino,
6486 					 &local_index, name);
6487 		if (err)
6488 			btrfs_abort_transaction(trans, err);
6489 	} else if (add_backref) {
6490 		u64 local_index;
6491 		int err;
6492 
6493 		err = btrfs_del_inode_ref(trans, root, name, ino, parent_ino,
6494 					  &local_index);
6495 		if (err)
6496 			btrfs_abort_transaction(trans, err);
6497 	}
6498 
6499 	/* Return the original error code */
6500 	return ret;
6501 }
6502 
6503 static int btrfs_create_common(struct inode *dir, struct dentry *dentry,
6504 			       struct inode *inode)
6505 {
6506 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
6507 	struct btrfs_root *root = BTRFS_I(dir)->root;
6508 	struct btrfs_new_inode_args new_inode_args = {
6509 		.dir = dir,
6510 		.dentry = dentry,
6511 		.inode = inode,
6512 	};
6513 	unsigned int trans_num_items;
6514 	struct btrfs_trans_handle *trans;
6515 	int err;
6516 
6517 	err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
6518 	if (err)
6519 		goto out_inode;
6520 
6521 	trans = btrfs_start_transaction(root, trans_num_items);
6522 	if (IS_ERR(trans)) {
6523 		err = PTR_ERR(trans);
6524 		goto out_new_inode_args;
6525 	}
6526 
6527 	err = btrfs_create_new_inode(trans, &new_inode_args);
6528 	if (!err)
6529 		d_instantiate_new(dentry, inode);
6530 
6531 	btrfs_end_transaction(trans);
6532 	btrfs_btree_balance_dirty(fs_info);
6533 out_new_inode_args:
6534 	btrfs_new_inode_args_destroy(&new_inode_args);
6535 out_inode:
6536 	if (err)
6537 		iput(inode);
6538 	return err;
6539 }
6540 
6541 static int btrfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
6542 		       struct dentry *dentry, umode_t mode, dev_t rdev)
6543 {
6544 	struct inode *inode;
6545 
6546 	inode = new_inode(dir->i_sb);
6547 	if (!inode)
6548 		return -ENOMEM;
6549 	inode_init_owner(idmap, inode, dir, mode);
6550 	inode->i_op = &btrfs_special_inode_operations;
6551 	init_special_inode(inode, inode->i_mode, rdev);
6552 	return btrfs_create_common(dir, dentry, inode);
6553 }
6554 
6555 static int btrfs_create(struct mnt_idmap *idmap, struct inode *dir,
6556 			struct dentry *dentry, umode_t mode, bool excl)
6557 {
6558 	struct inode *inode;
6559 
6560 	inode = new_inode(dir->i_sb);
6561 	if (!inode)
6562 		return -ENOMEM;
6563 	inode_init_owner(idmap, inode, dir, mode);
6564 	inode->i_fop = &btrfs_file_operations;
6565 	inode->i_op = &btrfs_file_inode_operations;
6566 	inode->i_mapping->a_ops = &btrfs_aops;
6567 	return btrfs_create_common(dir, dentry, inode);
6568 }
6569 
6570 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
6571 		      struct dentry *dentry)
6572 {
6573 	struct btrfs_trans_handle *trans = NULL;
6574 	struct btrfs_root *root = BTRFS_I(dir)->root;
6575 	struct inode *inode = d_inode(old_dentry);
6576 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
6577 	struct fscrypt_name fname;
6578 	u64 index;
6579 	int err;
6580 	int drop_inode = 0;
6581 
6582 	/* do not allow sys_link's with other subvols of the same device */
6583 	if (root->root_key.objectid != BTRFS_I(inode)->root->root_key.objectid)
6584 		return -EXDEV;
6585 
6586 	if (inode->i_nlink >= BTRFS_LINK_MAX)
6587 		return -EMLINK;
6588 
6589 	err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname);
6590 	if (err)
6591 		goto fail;
6592 
6593 	err = btrfs_set_inode_index(BTRFS_I(dir), &index);
6594 	if (err)
6595 		goto fail;
6596 
6597 	/*
6598 	 * 2 items for inode and inode ref
6599 	 * 2 items for dir items
6600 	 * 1 item for parent inode
6601 	 * 1 item for orphan item deletion if O_TMPFILE
6602 	 */
6603 	trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6);
6604 	if (IS_ERR(trans)) {
6605 		err = PTR_ERR(trans);
6606 		trans = NULL;
6607 		goto fail;
6608 	}
6609 
6610 	/* There are several dir indexes for this inode, clear the cache. */
6611 	BTRFS_I(inode)->dir_index = 0ULL;
6612 	inc_nlink(inode);
6613 	inode_inc_iversion(inode);
6614 	inode_set_ctime_current(inode);
6615 	ihold(inode);
6616 	set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
6617 
6618 	err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
6619 			     &fname.disk_name, 1, index);
6620 
6621 	if (err) {
6622 		drop_inode = 1;
6623 	} else {
6624 		struct dentry *parent = dentry->d_parent;
6625 
6626 		err = btrfs_update_inode(trans, BTRFS_I(inode));
6627 		if (err)
6628 			goto fail;
6629 		if (inode->i_nlink == 1) {
6630 			/*
6631 			 * If new hard link count is 1, it's a file created
6632 			 * with open(2) O_TMPFILE flag.
6633 			 */
6634 			err = btrfs_orphan_del(trans, BTRFS_I(inode));
6635 			if (err)
6636 				goto fail;
6637 		}
6638 		d_instantiate(dentry, inode);
6639 		btrfs_log_new_name(trans, old_dentry, NULL, 0, parent);
6640 	}
6641 
6642 fail:
6643 	fscrypt_free_filename(&fname);
6644 	if (trans)
6645 		btrfs_end_transaction(trans);
6646 	if (drop_inode) {
6647 		inode_dec_link_count(inode);
6648 		iput(inode);
6649 	}
6650 	btrfs_btree_balance_dirty(fs_info);
6651 	return err;
6652 }
6653 
6654 static int btrfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
6655 		       struct dentry *dentry, umode_t mode)
6656 {
6657 	struct inode *inode;
6658 
6659 	inode = new_inode(dir->i_sb);
6660 	if (!inode)
6661 		return -ENOMEM;
6662 	inode_init_owner(idmap, inode, dir, S_IFDIR | mode);
6663 	inode->i_op = &btrfs_dir_inode_operations;
6664 	inode->i_fop = &btrfs_dir_file_operations;
6665 	return btrfs_create_common(dir, dentry, inode);
6666 }
6667 
6668 static noinline int uncompress_inline(struct btrfs_path *path,
6669 				      struct page *page,
6670 				      struct btrfs_file_extent_item *item)
6671 {
6672 	int ret;
6673 	struct extent_buffer *leaf = path->nodes[0];
6674 	char *tmp;
6675 	size_t max_size;
6676 	unsigned long inline_size;
6677 	unsigned long ptr;
6678 	int compress_type;
6679 
6680 	compress_type = btrfs_file_extent_compression(leaf, item);
6681 	max_size = btrfs_file_extent_ram_bytes(leaf, item);
6682 	inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]);
6683 	tmp = kmalloc(inline_size, GFP_NOFS);
6684 	if (!tmp)
6685 		return -ENOMEM;
6686 	ptr = btrfs_file_extent_inline_start(item);
6687 
6688 	read_extent_buffer(leaf, tmp, ptr, inline_size);
6689 
6690 	max_size = min_t(unsigned long, PAGE_SIZE, max_size);
6691 	ret = btrfs_decompress(compress_type, tmp, page, 0, inline_size, max_size);
6692 
6693 	/*
6694 	 * decompression code contains a memset to fill in any space between the end
6695 	 * of the uncompressed data and the end of max_size in case the decompressed
6696 	 * data ends up shorter than ram_bytes.  That doesn't cover the hole between
6697 	 * the end of an inline extent and the beginning of the next block, so we
6698 	 * cover that region here.
6699 	 */
6700 
6701 	if (max_size < PAGE_SIZE)
6702 		memzero_page(page, max_size, PAGE_SIZE - max_size);
6703 	kfree(tmp);
6704 	return ret;
6705 }
6706 
6707 static int read_inline_extent(struct btrfs_inode *inode, struct btrfs_path *path,
6708 			      struct page *page)
6709 {
6710 	struct btrfs_file_extent_item *fi;
6711 	void *kaddr;
6712 	size_t copy_size;
6713 
6714 	if (!page || PageUptodate(page))
6715 		return 0;
6716 
6717 	ASSERT(page_offset(page) == 0);
6718 
6719 	fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
6720 			    struct btrfs_file_extent_item);
6721 	if (btrfs_file_extent_compression(path->nodes[0], fi) != BTRFS_COMPRESS_NONE)
6722 		return uncompress_inline(path, page, fi);
6723 
6724 	copy_size = min_t(u64, PAGE_SIZE,
6725 			  btrfs_file_extent_ram_bytes(path->nodes[0], fi));
6726 	kaddr = kmap_local_page(page);
6727 	read_extent_buffer(path->nodes[0], kaddr,
6728 			   btrfs_file_extent_inline_start(fi), copy_size);
6729 	kunmap_local(kaddr);
6730 	if (copy_size < PAGE_SIZE)
6731 		memzero_page(page, copy_size, PAGE_SIZE - copy_size);
6732 	return 0;
6733 }
6734 
6735 /*
6736  * Lookup the first extent overlapping a range in a file.
6737  *
6738  * @inode:	file to search in
6739  * @page:	page to read extent data into if the extent is inline
6740  * @pg_offset:	offset into @page to copy to
6741  * @start:	file offset
6742  * @len:	length of range starting at @start
6743  *
6744  * Return the first &struct extent_map which overlaps the given range, reading
6745  * it from the B-tree and caching it if necessary. Note that there may be more
6746  * extents which overlap the given range after the returned extent_map.
6747  *
6748  * If @page is not NULL and the extent is inline, this also reads the extent
6749  * data directly into the page and marks the extent up to date in the io_tree.
6750  *
6751  * Return: ERR_PTR on error, non-NULL extent_map on success.
6752  */
6753 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
6754 				    struct page *page, size_t pg_offset,
6755 				    u64 start, u64 len)
6756 {
6757 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
6758 	int ret = 0;
6759 	u64 extent_start = 0;
6760 	u64 extent_end = 0;
6761 	u64 objectid = btrfs_ino(inode);
6762 	int extent_type = -1;
6763 	struct btrfs_path *path = NULL;
6764 	struct btrfs_root *root = inode->root;
6765 	struct btrfs_file_extent_item *item;
6766 	struct extent_buffer *leaf;
6767 	struct btrfs_key found_key;
6768 	struct extent_map *em = NULL;
6769 	struct extent_map_tree *em_tree = &inode->extent_tree;
6770 
6771 	read_lock(&em_tree->lock);
6772 	em = lookup_extent_mapping(em_tree, start, len);
6773 	read_unlock(&em_tree->lock);
6774 
6775 	if (em) {
6776 		if (em->start > start || em->start + em->len <= start)
6777 			free_extent_map(em);
6778 		else if (em->block_start == EXTENT_MAP_INLINE && page)
6779 			free_extent_map(em);
6780 		else
6781 			goto out;
6782 	}
6783 	em = alloc_extent_map();
6784 	if (!em) {
6785 		ret = -ENOMEM;
6786 		goto out;
6787 	}
6788 	em->start = EXTENT_MAP_HOLE;
6789 	em->orig_start = EXTENT_MAP_HOLE;
6790 	em->len = (u64)-1;
6791 	em->block_len = (u64)-1;
6792 
6793 	path = btrfs_alloc_path();
6794 	if (!path) {
6795 		ret = -ENOMEM;
6796 		goto out;
6797 	}
6798 
6799 	/* Chances are we'll be called again, so go ahead and do readahead */
6800 	path->reada = READA_FORWARD;
6801 
6802 	/*
6803 	 * The same explanation in load_free_space_cache applies here as well,
6804 	 * we only read when we're loading the free space cache, and at that
6805 	 * point the commit_root has everything we need.
6806 	 */
6807 	if (btrfs_is_free_space_inode(inode)) {
6808 		path->search_commit_root = 1;
6809 		path->skip_locking = 1;
6810 	}
6811 
6812 	ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0);
6813 	if (ret < 0) {
6814 		goto out;
6815 	} else if (ret > 0) {
6816 		if (path->slots[0] == 0)
6817 			goto not_found;
6818 		path->slots[0]--;
6819 		ret = 0;
6820 	}
6821 
6822 	leaf = path->nodes[0];
6823 	item = btrfs_item_ptr(leaf, path->slots[0],
6824 			      struct btrfs_file_extent_item);
6825 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6826 	if (found_key.objectid != objectid ||
6827 	    found_key.type != BTRFS_EXTENT_DATA_KEY) {
6828 		/*
6829 		 * If we backup past the first extent we want to move forward
6830 		 * and see if there is an extent in front of us, otherwise we'll
6831 		 * say there is a hole for our whole search range which can
6832 		 * cause problems.
6833 		 */
6834 		extent_end = start;
6835 		goto next;
6836 	}
6837 
6838 	extent_type = btrfs_file_extent_type(leaf, item);
6839 	extent_start = found_key.offset;
6840 	extent_end = btrfs_file_extent_end(path);
6841 	if (extent_type == BTRFS_FILE_EXTENT_REG ||
6842 	    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
6843 		/* Only regular file could have regular/prealloc extent */
6844 		if (!S_ISREG(inode->vfs_inode.i_mode)) {
6845 			ret = -EUCLEAN;
6846 			btrfs_crit(fs_info,
6847 		"regular/prealloc extent found for non-regular inode %llu",
6848 				   btrfs_ino(inode));
6849 			goto out;
6850 		}
6851 		trace_btrfs_get_extent_show_fi_regular(inode, leaf, item,
6852 						       extent_start);
6853 	} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
6854 		trace_btrfs_get_extent_show_fi_inline(inode, leaf, item,
6855 						      path->slots[0],
6856 						      extent_start);
6857 	}
6858 next:
6859 	if (start >= extent_end) {
6860 		path->slots[0]++;
6861 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
6862 			ret = btrfs_next_leaf(root, path);
6863 			if (ret < 0)
6864 				goto out;
6865 			else if (ret > 0)
6866 				goto not_found;
6867 
6868 			leaf = path->nodes[0];
6869 		}
6870 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6871 		if (found_key.objectid != objectid ||
6872 		    found_key.type != BTRFS_EXTENT_DATA_KEY)
6873 			goto not_found;
6874 		if (start + len <= found_key.offset)
6875 			goto not_found;
6876 		if (start > found_key.offset)
6877 			goto next;
6878 
6879 		/* New extent overlaps with existing one */
6880 		em->start = start;
6881 		em->orig_start = start;
6882 		em->len = found_key.offset - start;
6883 		em->block_start = EXTENT_MAP_HOLE;
6884 		goto insert;
6885 	}
6886 
6887 	btrfs_extent_item_to_extent_map(inode, path, item, em);
6888 
6889 	if (extent_type == BTRFS_FILE_EXTENT_REG ||
6890 	    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
6891 		goto insert;
6892 	} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
6893 		/*
6894 		 * Inline extent can only exist at file offset 0. This is
6895 		 * ensured by tree-checker and inline extent creation path.
6896 		 * Thus all members representing file offsets should be zero.
6897 		 */
6898 		ASSERT(pg_offset == 0);
6899 		ASSERT(extent_start == 0);
6900 		ASSERT(em->start == 0);
6901 
6902 		/*
6903 		 * btrfs_extent_item_to_extent_map() should have properly
6904 		 * initialized em members already.
6905 		 *
6906 		 * Other members are not utilized for inline extents.
6907 		 */
6908 		ASSERT(em->block_start == EXTENT_MAP_INLINE);
6909 		ASSERT(em->len == fs_info->sectorsize);
6910 
6911 		ret = read_inline_extent(inode, path, page);
6912 		if (ret < 0)
6913 			goto out;
6914 		goto insert;
6915 	}
6916 not_found:
6917 	em->start = start;
6918 	em->orig_start = start;
6919 	em->len = len;
6920 	em->block_start = EXTENT_MAP_HOLE;
6921 insert:
6922 	ret = 0;
6923 	btrfs_release_path(path);
6924 	if (em->start > start || extent_map_end(em) <= start) {
6925 		btrfs_err(fs_info,
6926 			  "bad extent! em: [%llu %llu] passed [%llu %llu]",
6927 			  em->start, em->len, start, len);
6928 		ret = -EIO;
6929 		goto out;
6930 	}
6931 
6932 	write_lock(&em_tree->lock);
6933 	ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len);
6934 	write_unlock(&em_tree->lock);
6935 out:
6936 	btrfs_free_path(path);
6937 
6938 	trace_btrfs_get_extent(root, inode, em);
6939 
6940 	if (ret) {
6941 		free_extent_map(em);
6942 		return ERR_PTR(ret);
6943 	}
6944 	return em;
6945 }
6946 
6947 static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode,
6948 						  struct btrfs_dio_data *dio_data,
6949 						  const u64 start,
6950 						  const u64 len,
6951 						  const u64 orig_start,
6952 						  const u64 block_start,
6953 						  const u64 block_len,
6954 						  const u64 orig_block_len,
6955 						  const u64 ram_bytes,
6956 						  const int type)
6957 {
6958 	struct extent_map *em = NULL;
6959 	struct btrfs_ordered_extent *ordered;
6960 
6961 	if (type != BTRFS_ORDERED_NOCOW) {
6962 		em = create_io_em(inode, start, len, orig_start, block_start,
6963 				  block_len, orig_block_len, ram_bytes,
6964 				  BTRFS_COMPRESS_NONE, /* compress_type */
6965 				  type);
6966 		if (IS_ERR(em))
6967 			goto out;
6968 	}
6969 	ordered = btrfs_alloc_ordered_extent(inode, start, len, len,
6970 					     block_start, block_len, 0,
6971 					     (1 << type) |
6972 					     (1 << BTRFS_ORDERED_DIRECT),
6973 					     BTRFS_COMPRESS_NONE);
6974 	if (IS_ERR(ordered)) {
6975 		if (em) {
6976 			free_extent_map(em);
6977 			btrfs_drop_extent_map_range(inode, start,
6978 						    start + len - 1, false);
6979 		}
6980 		em = ERR_CAST(ordered);
6981 	} else {
6982 		ASSERT(!dio_data->ordered);
6983 		dio_data->ordered = ordered;
6984 	}
6985  out:
6986 
6987 	return em;
6988 }
6989 
6990 static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode,
6991 						  struct btrfs_dio_data *dio_data,
6992 						  u64 start, u64 len)
6993 {
6994 	struct btrfs_root *root = inode->root;
6995 	struct btrfs_fs_info *fs_info = root->fs_info;
6996 	struct extent_map *em;
6997 	struct btrfs_key ins;
6998 	u64 alloc_hint;
6999 	int ret;
7000 
7001 	alloc_hint = get_extent_allocation_hint(inode, start, len);
7002 again:
7003 	ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
7004 				   0, alloc_hint, &ins, 1, 1);
7005 	if (ret == -EAGAIN) {
7006 		ASSERT(btrfs_is_zoned(fs_info));
7007 		wait_on_bit_io(&inode->root->fs_info->flags, BTRFS_FS_NEED_ZONE_FINISH,
7008 			       TASK_UNINTERRUPTIBLE);
7009 		goto again;
7010 	}
7011 	if (ret)
7012 		return ERR_PTR(ret);
7013 
7014 	em = btrfs_create_dio_extent(inode, dio_data, start, ins.offset, start,
7015 				     ins.objectid, ins.offset, ins.offset,
7016 				     ins.offset, BTRFS_ORDERED_REGULAR);
7017 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
7018 	if (IS_ERR(em))
7019 		btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset,
7020 					   1);
7021 
7022 	return em;
7023 }
7024 
7025 static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
7026 {
7027 	struct btrfs_block_group *block_group;
7028 	bool readonly = false;
7029 
7030 	block_group = btrfs_lookup_block_group(fs_info, bytenr);
7031 	if (!block_group || block_group->ro)
7032 		readonly = true;
7033 	if (block_group)
7034 		btrfs_put_block_group(block_group);
7035 	return readonly;
7036 }
7037 
7038 /*
7039  * Check if we can do nocow write into the range [@offset, @offset + @len)
7040  *
7041  * @offset:	File offset
7042  * @len:	The length to write, will be updated to the nocow writeable
7043  *		range
7044  * @orig_start:	(optional) Return the original file offset of the file extent
7045  * @orig_len:	(optional) Return the original on-disk length of the file extent
7046  * @ram_bytes:	(optional) Return the ram_bytes of the file extent
7047  * @strict:	if true, omit optimizations that might force us into unnecessary
7048  *		cow. e.g., don't trust generation number.
7049  *
7050  * Return:
7051  * >0	and update @len if we can do nocow write
7052  *  0	if we can't do nocow write
7053  * <0	if error happened
7054  *
7055  * NOTE: This only checks the file extents, caller is responsible to wait for
7056  *	 any ordered extents.
7057  */
7058 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
7059 			      u64 *orig_start, u64 *orig_block_len,
7060 			      u64 *ram_bytes, bool nowait, bool strict)
7061 {
7062 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7063 	struct can_nocow_file_extent_args nocow_args = { 0 };
7064 	struct btrfs_path *path;
7065 	int ret;
7066 	struct extent_buffer *leaf;
7067 	struct btrfs_root *root = BTRFS_I(inode)->root;
7068 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7069 	struct btrfs_file_extent_item *fi;
7070 	struct btrfs_key key;
7071 	int found_type;
7072 
7073 	path = btrfs_alloc_path();
7074 	if (!path)
7075 		return -ENOMEM;
7076 	path->nowait = nowait;
7077 
7078 	ret = btrfs_lookup_file_extent(NULL, root, path,
7079 			btrfs_ino(BTRFS_I(inode)), offset, 0);
7080 	if (ret < 0)
7081 		goto out;
7082 
7083 	if (ret == 1) {
7084 		if (path->slots[0] == 0) {
7085 			/* can't find the item, must cow */
7086 			ret = 0;
7087 			goto out;
7088 		}
7089 		path->slots[0]--;
7090 	}
7091 	ret = 0;
7092 	leaf = path->nodes[0];
7093 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
7094 	if (key.objectid != btrfs_ino(BTRFS_I(inode)) ||
7095 	    key.type != BTRFS_EXTENT_DATA_KEY) {
7096 		/* not our file or wrong item type, must cow */
7097 		goto out;
7098 	}
7099 
7100 	if (key.offset > offset) {
7101 		/* Wrong offset, must cow */
7102 		goto out;
7103 	}
7104 
7105 	if (btrfs_file_extent_end(path) <= offset)
7106 		goto out;
7107 
7108 	fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
7109 	found_type = btrfs_file_extent_type(leaf, fi);
7110 	if (ram_bytes)
7111 		*ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
7112 
7113 	nocow_args.start = offset;
7114 	nocow_args.end = offset + *len - 1;
7115 	nocow_args.strict = strict;
7116 	nocow_args.free_path = true;
7117 
7118 	ret = can_nocow_file_extent(path, &key, BTRFS_I(inode), &nocow_args);
7119 	/* can_nocow_file_extent() has freed the path. */
7120 	path = NULL;
7121 
7122 	if (ret != 1) {
7123 		/* Treat errors as not being able to NOCOW. */
7124 		ret = 0;
7125 		goto out;
7126 	}
7127 
7128 	ret = 0;
7129 	if (btrfs_extent_readonly(fs_info, nocow_args.disk_bytenr))
7130 		goto out;
7131 
7132 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7133 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7134 		u64 range_end;
7135 
7136 		range_end = round_up(offset + nocow_args.num_bytes,
7137 				     root->fs_info->sectorsize) - 1;
7138 		ret = test_range_bit_exists(io_tree, offset, range_end, EXTENT_DELALLOC);
7139 		if (ret) {
7140 			ret = -EAGAIN;
7141 			goto out;
7142 		}
7143 	}
7144 
7145 	if (orig_start)
7146 		*orig_start = key.offset - nocow_args.extent_offset;
7147 	if (orig_block_len)
7148 		*orig_block_len = nocow_args.disk_num_bytes;
7149 
7150 	*len = nocow_args.num_bytes;
7151 	ret = 1;
7152 out:
7153 	btrfs_free_path(path);
7154 	return ret;
7155 }
7156 
7157 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7158 			      struct extent_state **cached_state,
7159 			      unsigned int iomap_flags)
7160 {
7161 	const bool writing = (iomap_flags & IOMAP_WRITE);
7162 	const bool nowait = (iomap_flags & IOMAP_NOWAIT);
7163 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7164 	struct btrfs_ordered_extent *ordered;
7165 	int ret = 0;
7166 
7167 	while (1) {
7168 		if (nowait) {
7169 			if (!try_lock_extent(io_tree, lockstart, lockend,
7170 					     cached_state))
7171 				return -EAGAIN;
7172 		} else {
7173 			lock_extent(io_tree, lockstart, lockend, cached_state);
7174 		}
7175 		/*
7176 		 * We're concerned with the entire range that we're going to be
7177 		 * doing DIO to, so we need to make sure there's no ordered
7178 		 * extents in this range.
7179 		 */
7180 		ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart,
7181 						     lockend - lockstart + 1);
7182 
7183 		/*
7184 		 * We need to make sure there are no buffered pages in this
7185 		 * range either, we could have raced between the invalidate in
7186 		 * generic_file_direct_write and locking the extent.  The
7187 		 * invalidate needs to happen so that reads after a write do not
7188 		 * get stale data.
7189 		 */
7190 		if (!ordered &&
7191 		    (!writing || !filemap_range_has_page(inode->i_mapping,
7192 							 lockstart, lockend)))
7193 			break;
7194 
7195 		unlock_extent(io_tree, lockstart, lockend, cached_state);
7196 
7197 		if (ordered) {
7198 			if (nowait) {
7199 				btrfs_put_ordered_extent(ordered);
7200 				ret = -EAGAIN;
7201 				break;
7202 			}
7203 			/*
7204 			 * If we are doing a DIO read and the ordered extent we
7205 			 * found is for a buffered write, we can not wait for it
7206 			 * to complete and retry, because if we do so we can
7207 			 * deadlock with concurrent buffered writes on page
7208 			 * locks. This happens only if our DIO read covers more
7209 			 * than one extent map, if at this point has already
7210 			 * created an ordered extent for a previous extent map
7211 			 * and locked its range in the inode's io tree, and a
7212 			 * concurrent write against that previous extent map's
7213 			 * range and this range started (we unlock the ranges
7214 			 * in the io tree only when the bios complete and
7215 			 * buffered writes always lock pages before attempting
7216 			 * to lock range in the io tree).
7217 			 */
7218 			if (writing ||
7219 			    test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags))
7220 				btrfs_start_ordered_extent(ordered);
7221 			else
7222 				ret = nowait ? -EAGAIN : -ENOTBLK;
7223 			btrfs_put_ordered_extent(ordered);
7224 		} else {
7225 			/*
7226 			 * We could trigger writeback for this range (and wait
7227 			 * for it to complete) and then invalidate the pages for
7228 			 * this range (through invalidate_inode_pages2_range()),
7229 			 * but that can lead us to a deadlock with a concurrent
7230 			 * call to readahead (a buffered read or a defrag call
7231 			 * triggered a readahead) on a page lock due to an
7232 			 * ordered dio extent we created before but did not have
7233 			 * yet a corresponding bio submitted (whence it can not
7234 			 * complete), which makes readahead wait for that
7235 			 * ordered extent to complete while holding a lock on
7236 			 * that page.
7237 			 */
7238 			ret = nowait ? -EAGAIN : -ENOTBLK;
7239 		}
7240 
7241 		if (ret)
7242 			break;
7243 
7244 		cond_resched();
7245 	}
7246 
7247 	return ret;
7248 }
7249 
7250 /* The callers of this must take lock_extent() */
7251 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
7252 				       u64 len, u64 orig_start, u64 block_start,
7253 				       u64 block_len, u64 orig_block_len,
7254 				       u64 ram_bytes, int compress_type,
7255 				       int type)
7256 {
7257 	struct extent_map *em;
7258 	int ret;
7259 
7260 	ASSERT(type == BTRFS_ORDERED_PREALLOC ||
7261 	       type == BTRFS_ORDERED_COMPRESSED ||
7262 	       type == BTRFS_ORDERED_NOCOW ||
7263 	       type == BTRFS_ORDERED_REGULAR);
7264 
7265 	em = alloc_extent_map();
7266 	if (!em)
7267 		return ERR_PTR(-ENOMEM);
7268 
7269 	em->start = start;
7270 	em->orig_start = orig_start;
7271 	em->len = len;
7272 	em->block_len = block_len;
7273 	em->block_start = block_start;
7274 	em->orig_block_len = orig_block_len;
7275 	em->ram_bytes = ram_bytes;
7276 	em->generation = -1;
7277 	em->flags |= EXTENT_FLAG_PINNED;
7278 	if (type == BTRFS_ORDERED_PREALLOC)
7279 		em->flags |= EXTENT_FLAG_FILLING;
7280 	else if (type == BTRFS_ORDERED_COMPRESSED)
7281 		extent_map_set_compression(em, compress_type);
7282 
7283 	ret = btrfs_replace_extent_map_range(inode, em, true);
7284 	if (ret) {
7285 		free_extent_map(em);
7286 		return ERR_PTR(ret);
7287 	}
7288 
7289 	/* em got 2 refs now, callers needs to do free_extent_map once. */
7290 	return em;
7291 }
7292 
7293 
7294 static int btrfs_get_blocks_direct_write(struct extent_map **map,
7295 					 struct inode *inode,
7296 					 struct btrfs_dio_data *dio_data,
7297 					 u64 start, u64 *lenp,
7298 					 unsigned int iomap_flags)
7299 {
7300 	const bool nowait = (iomap_flags & IOMAP_NOWAIT);
7301 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7302 	struct extent_map *em = *map;
7303 	int type;
7304 	u64 block_start, orig_start, orig_block_len, ram_bytes;
7305 	struct btrfs_block_group *bg;
7306 	bool can_nocow = false;
7307 	bool space_reserved = false;
7308 	u64 len = *lenp;
7309 	u64 prev_len;
7310 	int ret = 0;
7311 
7312 	/*
7313 	 * We don't allocate a new extent in the following cases
7314 	 *
7315 	 * 1) The inode is marked as NODATACOW. In this case we'll just use the
7316 	 * existing extent.
7317 	 * 2) The extent is marked as PREALLOC. We're good to go here and can
7318 	 * just use the extent.
7319 	 *
7320 	 */
7321 	if ((em->flags & EXTENT_FLAG_PREALLOC) ||
7322 	    ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7323 	     em->block_start != EXTENT_MAP_HOLE)) {
7324 		if (em->flags & EXTENT_FLAG_PREALLOC)
7325 			type = BTRFS_ORDERED_PREALLOC;
7326 		else
7327 			type = BTRFS_ORDERED_NOCOW;
7328 		len = min(len, em->len - (start - em->start));
7329 		block_start = em->block_start + (start - em->start);
7330 
7331 		if (can_nocow_extent(inode, start, &len, &orig_start,
7332 				     &orig_block_len, &ram_bytes, false, false) == 1) {
7333 			bg = btrfs_inc_nocow_writers(fs_info, block_start);
7334 			if (bg)
7335 				can_nocow = true;
7336 		}
7337 	}
7338 
7339 	prev_len = len;
7340 	if (can_nocow) {
7341 		struct extent_map *em2;
7342 
7343 		/* We can NOCOW, so only need to reserve metadata space. */
7344 		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len,
7345 						      nowait);
7346 		if (ret < 0) {
7347 			/* Our caller expects us to free the input extent map. */
7348 			free_extent_map(em);
7349 			*map = NULL;
7350 			btrfs_dec_nocow_writers(bg);
7351 			if (nowait && (ret == -ENOSPC || ret == -EDQUOT))
7352 				ret = -EAGAIN;
7353 			goto out;
7354 		}
7355 		space_reserved = true;
7356 
7357 		em2 = btrfs_create_dio_extent(BTRFS_I(inode), dio_data, start, len,
7358 					      orig_start, block_start,
7359 					      len, orig_block_len,
7360 					      ram_bytes, type);
7361 		btrfs_dec_nocow_writers(bg);
7362 		if (type == BTRFS_ORDERED_PREALLOC) {
7363 			free_extent_map(em);
7364 			*map = em2;
7365 			em = em2;
7366 		}
7367 
7368 		if (IS_ERR(em2)) {
7369 			ret = PTR_ERR(em2);
7370 			goto out;
7371 		}
7372 
7373 		dio_data->nocow_done = true;
7374 	} else {
7375 		/* Our caller expects us to free the input extent map. */
7376 		free_extent_map(em);
7377 		*map = NULL;
7378 
7379 		if (nowait) {
7380 			ret = -EAGAIN;
7381 			goto out;
7382 		}
7383 
7384 		/*
7385 		 * If we could not allocate data space before locking the file
7386 		 * range and we can't do a NOCOW write, then we have to fail.
7387 		 */
7388 		if (!dio_data->data_space_reserved) {
7389 			ret = -ENOSPC;
7390 			goto out;
7391 		}
7392 
7393 		/*
7394 		 * We have to COW and we have already reserved data space before,
7395 		 * so now we reserve only metadata.
7396 		 */
7397 		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len,
7398 						      false);
7399 		if (ret < 0)
7400 			goto out;
7401 		space_reserved = true;
7402 
7403 		em = btrfs_new_extent_direct(BTRFS_I(inode), dio_data, start, len);
7404 		if (IS_ERR(em)) {
7405 			ret = PTR_ERR(em);
7406 			goto out;
7407 		}
7408 		*map = em;
7409 		len = min(len, em->len - (start - em->start));
7410 		if (len < prev_len)
7411 			btrfs_delalloc_release_metadata(BTRFS_I(inode),
7412 							prev_len - len, true);
7413 	}
7414 
7415 	/*
7416 	 * We have created our ordered extent, so we can now release our reservation
7417 	 * for an outstanding extent.
7418 	 */
7419 	btrfs_delalloc_release_extents(BTRFS_I(inode), prev_len);
7420 
7421 	/*
7422 	 * Need to update the i_size under the extent lock so buffered
7423 	 * readers will get the updated i_size when we unlock.
7424 	 */
7425 	if (start + len > i_size_read(inode))
7426 		i_size_write(inode, start + len);
7427 out:
7428 	if (ret && space_reserved) {
7429 		btrfs_delalloc_release_extents(BTRFS_I(inode), len);
7430 		btrfs_delalloc_release_metadata(BTRFS_I(inode), len, true);
7431 	}
7432 	*lenp = len;
7433 	return ret;
7434 }
7435 
7436 static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
7437 		loff_t length, unsigned int flags, struct iomap *iomap,
7438 		struct iomap *srcmap)
7439 {
7440 	struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
7441 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
7442 	struct extent_map *em;
7443 	struct extent_state *cached_state = NULL;
7444 	struct btrfs_dio_data *dio_data = iter->private;
7445 	u64 lockstart, lockend;
7446 	const bool write = !!(flags & IOMAP_WRITE);
7447 	int ret = 0;
7448 	u64 len = length;
7449 	const u64 data_alloc_len = length;
7450 	bool unlock_extents = false;
7451 
7452 	/*
7453 	 * We could potentially fault if we have a buffer > PAGE_SIZE, and if
7454 	 * we're NOWAIT we may submit a bio for a partial range and return
7455 	 * EIOCBQUEUED, which would result in an errant short read.
7456 	 *
7457 	 * The best way to handle this would be to allow for partial completions
7458 	 * of iocb's, so we could submit the partial bio, return and fault in
7459 	 * the rest of the pages, and then submit the io for the rest of the
7460 	 * range.  However we don't have that currently, so simply return
7461 	 * -EAGAIN at this point so that the normal path is used.
7462 	 */
7463 	if (!write && (flags & IOMAP_NOWAIT) && length > PAGE_SIZE)
7464 		return -EAGAIN;
7465 
7466 	/*
7467 	 * Cap the size of reads to that usually seen in buffered I/O as we need
7468 	 * to allocate a contiguous array for the checksums.
7469 	 */
7470 	if (!write)
7471 		len = min_t(u64, len, fs_info->sectorsize * BTRFS_MAX_BIO_SECTORS);
7472 
7473 	lockstart = start;
7474 	lockend = start + len - 1;
7475 
7476 	/*
7477 	 * iomap_dio_rw() only does filemap_write_and_wait_range(), which isn't
7478 	 * enough if we've written compressed pages to this area, so we need to
7479 	 * flush the dirty pages again to make absolutely sure that any
7480 	 * outstanding dirty pages are on disk - the first flush only starts
7481 	 * compression on the data, while keeping the pages locked, so by the
7482 	 * time the second flush returns we know bios for the compressed pages
7483 	 * were submitted and finished, and the pages no longer under writeback.
7484 	 *
7485 	 * If we have a NOWAIT request and we have any pages in the range that
7486 	 * are locked, likely due to compression still in progress, we don't want
7487 	 * to block on page locks. We also don't want to block on pages marked as
7488 	 * dirty or under writeback (same as for the non-compression case).
7489 	 * iomap_dio_rw() did the same check, but after that and before we got
7490 	 * here, mmap'ed writes may have happened or buffered reads started
7491 	 * (readpage() and readahead(), which lock pages), as we haven't locked
7492 	 * the file range yet.
7493 	 */
7494 	if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
7495 		     &BTRFS_I(inode)->runtime_flags)) {
7496 		if (flags & IOMAP_NOWAIT) {
7497 			if (filemap_range_needs_writeback(inode->i_mapping,
7498 							  lockstart, lockend))
7499 				return -EAGAIN;
7500 		} else {
7501 			ret = filemap_fdatawrite_range(inode->i_mapping, start,
7502 						       start + length - 1);
7503 			if (ret)
7504 				return ret;
7505 		}
7506 	}
7507 
7508 	memset(dio_data, 0, sizeof(*dio_data));
7509 
7510 	/*
7511 	 * We always try to allocate data space and must do it before locking
7512 	 * the file range, to avoid deadlocks with concurrent writes to the same
7513 	 * range if the range has several extents and the writes don't expand the
7514 	 * current i_size (the inode lock is taken in shared mode). If we fail to
7515 	 * allocate data space here we continue and later, after locking the
7516 	 * file range, we fail with ENOSPC only if we figure out we can not do a
7517 	 * NOCOW write.
7518 	 */
7519 	if (write && !(flags & IOMAP_NOWAIT)) {
7520 		ret = btrfs_check_data_free_space(BTRFS_I(inode),
7521 						  &dio_data->data_reserved,
7522 						  start, data_alloc_len, false);
7523 		if (!ret)
7524 			dio_data->data_space_reserved = true;
7525 		else if (ret && !(BTRFS_I(inode)->flags &
7526 				  (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
7527 			goto err;
7528 	}
7529 
7530 	/*
7531 	 * If this errors out it's because we couldn't invalidate pagecache for
7532 	 * this range and we need to fallback to buffered IO, or we are doing a
7533 	 * NOWAIT read/write and we need to block.
7534 	 */
7535 	ret = lock_extent_direct(inode, lockstart, lockend, &cached_state, flags);
7536 	if (ret < 0)
7537 		goto err;
7538 
7539 	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len);
7540 	if (IS_ERR(em)) {
7541 		ret = PTR_ERR(em);
7542 		goto unlock_err;
7543 	}
7544 
7545 	/*
7546 	 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
7547 	 * io.  INLINE is special, and we could probably kludge it in here, but
7548 	 * it's still buffered so for safety lets just fall back to the generic
7549 	 * buffered path.
7550 	 *
7551 	 * For COMPRESSED we _have_ to read the entire extent in so we can
7552 	 * decompress it, so there will be buffering required no matter what we
7553 	 * do, so go ahead and fallback to buffered.
7554 	 *
7555 	 * We return -ENOTBLK because that's what makes DIO go ahead and go back
7556 	 * to buffered IO.  Don't blame me, this is the price we pay for using
7557 	 * the generic code.
7558 	 */
7559 	if (extent_map_is_compressed(em) ||
7560 	    em->block_start == EXTENT_MAP_INLINE) {
7561 		free_extent_map(em);
7562 		/*
7563 		 * If we are in a NOWAIT context, return -EAGAIN in order to
7564 		 * fallback to buffered IO. This is not only because we can
7565 		 * block with buffered IO (no support for NOWAIT semantics at
7566 		 * the moment) but also to avoid returning short reads to user
7567 		 * space - this happens if we were able to read some data from
7568 		 * previous non-compressed extents and then when we fallback to
7569 		 * buffered IO, at btrfs_file_read_iter() by calling
7570 		 * filemap_read(), we fail to fault in pages for the read buffer,
7571 		 * in which case filemap_read() returns a short read (the number
7572 		 * of bytes previously read is > 0, so it does not return -EFAULT).
7573 		 */
7574 		ret = (flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOTBLK;
7575 		goto unlock_err;
7576 	}
7577 
7578 	len = min(len, em->len - (start - em->start));
7579 
7580 	/*
7581 	 * If we have a NOWAIT request and the range contains multiple extents
7582 	 * (or a mix of extents and holes), then we return -EAGAIN to make the
7583 	 * caller fallback to a context where it can do a blocking (without
7584 	 * NOWAIT) request. This way we avoid doing partial IO and returning
7585 	 * success to the caller, which is not optimal for writes and for reads
7586 	 * it can result in unexpected behaviour for an application.
7587 	 *
7588 	 * When doing a read, because we use IOMAP_DIO_PARTIAL when calling
7589 	 * iomap_dio_rw(), we can end up returning less data then what the caller
7590 	 * asked for, resulting in an unexpected, and incorrect, short read.
7591 	 * That is, the caller asked to read N bytes and we return less than that,
7592 	 * which is wrong unless we are crossing EOF. This happens if we get a
7593 	 * page fault error when trying to fault in pages for the buffer that is
7594 	 * associated to the struct iov_iter passed to iomap_dio_rw(), and we
7595 	 * have previously submitted bios for other extents in the range, in
7596 	 * which case iomap_dio_rw() may return us EIOCBQUEUED if not all of
7597 	 * those bios have completed by the time we get the page fault error,
7598 	 * which we return back to our caller - we should only return EIOCBQUEUED
7599 	 * after we have submitted bios for all the extents in the range.
7600 	 */
7601 	if ((flags & IOMAP_NOWAIT) && len < length) {
7602 		free_extent_map(em);
7603 		ret = -EAGAIN;
7604 		goto unlock_err;
7605 	}
7606 
7607 	if (write) {
7608 		ret = btrfs_get_blocks_direct_write(&em, inode, dio_data,
7609 						    start, &len, flags);
7610 		if (ret < 0)
7611 			goto unlock_err;
7612 		unlock_extents = true;
7613 		/* Recalc len in case the new em is smaller than requested */
7614 		len = min(len, em->len - (start - em->start));
7615 		if (dio_data->data_space_reserved) {
7616 			u64 release_offset;
7617 			u64 release_len = 0;
7618 
7619 			if (dio_data->nocow_done) {
7620 				release_offset = start;
7621 				release_len = data_alloc_len;
7622 			} else if (len < data_alloc_len) {
7623 				release_offset = start + len;
7624 				release_len = data_alloc_len - len;
7625 			}
7626 
7627 			if (release_len > 0)
7628 				btrfs_free_reserved_data_space(BTRFS_I(inode),
7629 							       dio_data->data_reserved,
7630 							       release_offset,
7631 							       release_len);
7632 		}
7633 	} else {
7634 		/*
7635 		 * We need to unlock only the end area that we aren't using.
7636 		 * The rest is going to be unlocked by the endio routine.
7637 		 */
7638 		lockstart = start + len;
7639 		if (lockstart < lockend)
7640 			unlock_extents = true;
7641 	}
7642 
7643 	if (unlock_extents)
7644 		unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7645 			      &cached_state);
7646 	else
7647 		free_extent_state(cached_state);
7648 
7649 	/*
7650 	 * Translate extent map information to iomap.
7651 	 * We trim the extents (and move the addr) even though iomap code does
7652 	 * that, since we have locked only the parts we are performing I/O in.
7653 	 */
7654 	if ((em->block_start == EXTENT_MAP_HOLE) ||
7655 	    ((em->flags & EXTENT_FLAG_PREALLOC) && !write)) {
7656 		iomap->addr = IOMAP_NULL_ADDR;
7657 		iomap->type = IOMAP_HOLE;
7658 	} else {
7659 		iomap->addr = em->block_start + (start - em->start);
7660 		iomap->type = IOMAP_MAPPED;
7661 	}
7662 	iomap->offset = start;
7663 	iomap->bdev = fs_info->fs_devices->latest_dev->bdev;
7664 	iomap->length = len;
7665 	free_extent_map(em);
7666 
7667 	return 0;
7668 
7669 unlock_err:
7670 	unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7671 		      &cached_state);
7672 err:
7673 	if (dio_data->data_space_reserved) {
7674 		btrfs_free_reserved_data_space(BTRFS_I(inode),
7675 					       dio_data->data_reserved,
7676 					       start, data_alloc_len);
7677 		extent_changeset_free(dio_data->data_reserved);
7678 	}
7679 
7680 	return ret;
7681 }
7682 
7683 static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length,
7684 		ssize_t written, unsigned int flags, struct iomap *iomap)
7685 {
7686 	struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
7687 	struct btrfs_dio_data *dio_data = iter->private;
7688 	size_t submitted = dio_data->submitted;
7689 	const bool write = !!(flags & IOMAP_WRITE);
7690 	int ret = 0;
7691 
7692 	if (!write && (iomap->type == IOMAP_HOLE)) {
7693 		/* If reading from a hole, unlock and return */
7694 		unlock_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1,
7695 			      NULL);
7696 		return 0;
7697 	}
7698 
7699 	if (submitted < length) {
7700 		pos += submitted;
7701 		length -= submitted;
7702 		if (write)
7703 			btrfs_finish_ordered_extent(dio_data->ordered, NULL,
7704 						    pos, length, false);
7705 		else
7706 			unlock_extent(&BTRFS_I(inode)->io_tree, pos,
7707 				      pos + length - 1, NULL);
7708 		ret = -ENOTBLK;
7709 	}
7710 	if (write) {
7711 		btrfs_put_ordered_extent(dio_data->ordered);
7712 		dio_data->ordered = NULL;
7713 	}
7714 
7715 	if (write)
7716 		extent_changeset_free(dio_data->data_reserved);
7717 	return ret;
7718 }
7719 
7720 static void btrfs_dio_end_io(struct btrfs_bio *bbio)
7721 {
7722 	struct btrfs_dio_private *dip =
7723 		container_of(bbio, struct btrfs_dio_private, bbio);
7724 	struct btrfs_inode *inode = bbio->inode;
7725 	struct bio *bio = &bbio->bio;
7726 
7727 	if (bio->bi_status) {
7728 		btrfs_warn(inode->root->fs_info,
7729 		"direct IO failed ino %llu op 0x%0x offset %#llx len %u err no %d",
7730 			   btrfs_ino(inode), bio->bi_opf,
7731 			   dip->file_offset, dip->bytes, bio->bi_status);
7732 	}
7733 
7734 	if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
7735 		btrfs_finish_ordered_extent(bbio->ordered, NULL,
7736 					    dip->file_offset, dip->bytes,
7737 					    !bio->bi_status);
7738 	} else {
7739 		unlock_extent(&inode->io_tree, dip->file_offset,
7740 			      dip->file_offset + dip->bytes - 1, NULL);
7741 	}
7742 
7743 	bbio->bio.bi_private = bbio->private;
7744 	iomap_dio_bio_end_io(bio);
7745 }
7746 
7747 static void btrfs_dio_submit_io(const struct iomap_iter *iter, struct bio *bio,
7748 				loff_t file_offset)
7749 {
7750 	struct btrfs_bio *bbio = btrfs_bio(bio);
7751 	struct btrfs_dio_private *dip =
7752 		container_of(bbio, struct btrfs_dio_private, bbio);
7753 	struct btrfs_dio_data *dio_data = iter->private;
7754 
7755 	btrfs_bio_init(bbio, BTRFS_I(iter->inode)->root->fs_info,
7756 		       btrfs_dio_end_io, bio->bi_private);
7757 	bbio->inode = BTRFS_I(iter->inode);
7758 	bbio->file_offset = file_offset;
7759 
7760 	dip->file_offset = file_offset;
7761 	dip->bytes = bio->bi_iter.bi_size;
7762 
7763 	dio_data->submitted += bio->bi_iter.bi_size;
7764 
7765 	/*
7766 	 * Check if we are doing a partial write.  If we are, we need to split
7767 	 * the ordered extent to match the submitted bio.  Hang on to the
7768 	 * remaining unfinishable ordered_extent in dio_data so that it can be
7769 	 * cancelled in iomap_end to avoid a deadlock wherein faulting the
7770 	 * remaining pages is blocked on the outstanding ordered extent.
7771 	 */
7772 	if (iter->flags & IOMAP_WRITE) {
7773 		int ret;
7774 
7775 		ret = btrfs_extract_ordered_extent(bbio, dio_data->ordered);
7776 		if (ret) {
7777 			btrfs_finish_ordered_extent(dio_data->ordered, NULL,
7778 						    file_offset, dip->bytes,
7779 						    !ret);
7780 			bio->bi_status = errno_to_blk_status(ret);
7781 			iomap_dio_bio_end_io(bio);
7782 			return;
7783 		}
7784 	}
7785 
7786 	btrfs_submit_bio(bbio, 0);
7787 }
7788 
7789 static const struct iomap_ops btrfs_dio_iomap_ops = {
7790 	.iomap_begin            = btrfs_dio_iomap_begin,
7791 	.iomap_end              = btrfs_dio_iomap_end,
7792 };
7793 
7794 static const struct iomap_dio_ops btrfs_dio_ops = {
7795 	.submit_io		= btrfs_dio_submit_io,
7796 	.bio_set		= &btrfs_dio_bioset,
7797 };
7798 
7799 ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter, size_t done_before)
7800 {
7801 	struct btrfs_dio_data data = { 0 };
7802 
7803 	return iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
7804 			    IOMAP_DIO_PARTIAL, &data, done_before);
7805 }
7806 
7807 struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter,
7808 				  size_t done_before)
7809 {
7810 	struct btrfs_dio_data data = { 0 };
7811 
7812 	return __iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
7813 			    IOMAP_DIO_PARTIAL, &data, done_before);
7814 }
7815 
7816 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
7817 			u64 start, u64 len)
7818 {
7819 	int	ret;
7820 
7821 	ret = fiemap_prep(inode, fieinfo, start, &len, 0);
7822 	if (ret)
7823 		return ret;
7824 
7825 	/*
7826 	 * fiemap_prep() called filemap_write_and_wait() for the whole possible
7827 	 * file range (0 to LLONG_MAX), but that is not enough if we have
7828 	 * compression enabled. The first filemap_fdatawrite_range() only kicks
7829 	 * in the compression of data (in an async thread) and will return
7830 	 * before the compression is done and writeback is started. A second
7831 	 * filemap_fdatawrite_range() is needed to wait for the compression to
7832 	 * complete and writeback to start. We also need to wait for ordered
7833 	 * extents to complete, because our fiemap implementation uses mainly
7834 	 * file extent items to list the extents, searching for extent maps
7835 	 * only for file ranges with holes or prealloc extents to figure out
7836 	 * if we have delalloc in those ranges.
7837 	 */
7838 	if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) {
7839 		ret = btrfs_wait_ordered_range(inode, 0, LLONG_MAX);
7840 		if (ret)
7841 			return ret;
7842 	}
7843 
7844 	return extent_fiemap(BTRFS_I(inode), fieinfo, start, len);
7845 }
7846 
7847 static int btrfs_writepages(struct address_space *mapping,
7848 			    struct writeback_control *wbc)
7849 {
7850 	return extent_writepages(mapping, wbc);
7851 }
7852 
7853 static void btrfs_readahead(struct readahead_control *rac)
7854 {
7855 	extent_readahead(rac);
7856 }
7857 
7858 /*
7859  * For release_folio() and invalidate_folio() we have a race window where
7860  * folio_end_writeback() is called but the subpage spinlock is not yet released.
7861  * If we continue to release/invalidate the page, we could cause use-after-free
7862  * for subpage spinlock.  So this function is to spin and wait for subpage
7863  * spinlock.
7864  */
7865 static void wait_subpage_spinlock(struct page *page)
7866 {
7867 	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
7868 	struct folio *folio = page_folio(page);
7869 	struct btrfs_subpage *subpage;
7870 
7871 	if (!btrfs_is_subpage(fs_info, page->mapping))
7872 		return;
7873 
7874 	ASSERT(folio_test_private(folio) && folio_get_private(folio));
7875 	subpage = folio_get_private(folio);
7876 
7877 	/*
7878 	 * This may look insane as we just acquire the spinlock and release it,
7879 	 * without doing anything.  But we just want to make sure no one is
7880 	 * still holding the subpage spinlock.
7881 	 * And since the page is not dirty nor writeback, and we have page
7882 	 * locked, the only possible way to hold a spinlock is from the endio
7883 	 * function to clear page writeback.
7884 	 *
7885 	 * Here we just acquire the spinlock so that all existing callers
7886 	 * should exit and we're safe to release/invalidate the page.
7887 	 */
7888 	spin_lock_irq(&subpage->lock);
7889 	spin_unlock_irq(&subpage->lock);
7890 }
7891 
7892 static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
7893 {
7894 	int ret = try_release_extent_mapping(&folio->page, gfp_flags);
7895 
7896 	if (ret == 1) {
7897 		wait_subpage_spinlock(&folio->page);
7898 		clear_page_extent_mapped(&folio->page);
7899 	}
7900 	return ret;
7901 }
7902 
7903 static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
7904 {
7905 	if (folio_test_writeback(folio) || folio_test_dirty(folio))
7906 		return false;
7907 	return __btrfs_release_folio(folio, gfp_flags);
7908 }
7909 
7910 #ifdef CONFIG_MIGRATION
7911 static int btrfs_migrate_folio(struct address_space *mapping,
7912 			     struct folio *dst, struct folio *src,
7913 			     enum migrate_mode mode)
7914 {
7915 	int ret = filemap_migrate_folio(mapping, dst, src, mode);
7916 
7917 	if (ret != MIGRATEPAGE_SUCCESS)
7918 		return ret;
7919 
7920 	if (folio_test_ordered(src)) {
7921 		folio_clear_ordered(src);
7922 		folio_set_ordered(dst);
7923 	}
7924 
7925 	return MIGRATEPAGE_SUCCESS;
7926 }
7927 #else
7928 #define btrfs_migrate_folio NULL
7929 #endif
7930 
7931 static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
7932 				 size_t length)
7933 {
7934 	struct btrfs_inode *inode = BTRFS_I(folio->mapping->host);
7935 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
7936 	struct extent_io_tree *tree = &inode->io_tree;
7937 	struct extent_state *cached_state = NULL;
7938 	u64 page_start = folio_pos(folio);
7939 	u64 page_end = page_start + folio_size(folio) - 1;
7940 	u64 cur;
7941 	int inode_evicting = inode->vfs_inode.i_state & I_FREEING;
7942 
7943 	/*
7944 	 * We have folio locked so no new ordered extent can be created on this
7945 	 * page, nor bio can be submitted for this folio.
7946 	 *
7947 	 * But already submitted bio can still be finished on this folio.
7948 	 * Furthermore, endio function won't skip folio which has Ordered
7949 	 * (Private2) already cleared, so it's possible for endio and
7950 	 * invalidate_folio to do the same ordered extent accounting twice
7951 	 * on one folio.
7952 	 *
7953 	 * So here we wait for any submitted bios to finish, so that we won't
7954 	 * do double ordered extent accounting on the same folio.
7955 	 */
7956 	folio_wait_writeback(folio);
7957 	wait_subpage_spinlock(&folio->page);
7958 
7959 	/*
7960 	 * For subpage case, we have call sites like
7961 	 * btrfs_punch_hole_lock_range() which passes range not aligned to
7962 	 * sectorsize.
7963 	 * If the range doesn't cover the full folio, we don't need to and
7964 	 * shouldn't clear page extent mapped, as folio->private can still
7965 	 * record subpage dirty bits for other part of the range.
7966 	 *
7967 	 * For cases that invalidate the full folio even the range doesn't
7968 	 * cover the full folio, like invalidating the last folio, we're
7969 	 * still safe to wait for ordered extent to finish.
7970 	 */
7971 	if (!(offset == 0 && length == folio_size(folio))) {
7972 		btrfs_release_folio(folio, GFP_NOFS);
7973 		return;
7974 	}
7975 
7976 	if (!inode_evicting)
7977 		lock_extent(tree, page_start, page_end, &cached_state);
7978 
7979 	cur = page_start;
7980 	while (cur < page_end) {
7981 		struct btrfs_ordered_extent *ordered;
7982 		u64 range_end;
7983 		u32 range_len;
7984 		u32 extra_flags = 0;
7985 
7986 		ordered = btrfs_lookup_first_ordered_range(inode, cur,
7987 							   page_end + 1 - cur);
7988 		if (!ordered) {
7989 			range_end = page_end;
7990 			/*
7991 			 * No ordered extent covering this range, we are safe
7992 			 * to delete all extent states in the range.
7993 			 */
7994 			extra_flags = EXTENT_CLEAR_ALL_BITS;
7995 			goto next;
7996 		}
7997 		if (ordered->file_offset > cur) {
7998 			/*
7999 			 * There is a range between [cur, oe->file_offset) not
8000 			 * covered by any ordered extent.
8001 			 * We are safe to delete all extent states, and handle
8002 			 * the ordered extent in the next iteration.
8003 			 */
8004 			range_end = ordered->file_offset - 1;
8005 			extra_flags = EXTENT_CLEAR_ALL_BITS;
8006 			goto next;
8007 		}
8008 
8009 		range_end = min(ordered->file_offset + ordered->num_bytes - 1,
8010 				page_end);
8011 		ASSERT(range_end + 1 - cur < U32_MAX);
8012 		range_len = range_end + 1 - cur;
8013 		if (!btrfs_folio_test_ordered(fs_info, folio, cur, range_len)) {
8014 			/*
8015 			 * If Ordered (Private2) is cleared, it means endio has
8016 			 * already been executed for the range.
8017 			 * We can't delete the extent states as
8018 			 * btrfs_finish_ordered_io() may still use some of them.
8019 			 */
8020 			goto next;
8021 		}
8022 		btrfs_folio_clear_ordered(fs_info, folio, cur, range_len);
8023 
8024 		/*
8025 		 * IO on this page will never be started, so we need to account
8026 		 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW
8027 		 * here, must leave that up for the ordered extent completion.
8028 		 *
8029 		 * This will also unlock the range for incoming
8030 		 * btrfs_finish_ordered_io().
8031 		 */
8032 		if (!inode_evicting)
8033 			clear_extent_bit(tree, cur, range_end,
8034 					 EXTENT_DELALLOC |
8035 					 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
8036 					 EXTENT_DEFRAG, &cached_state);
8037 
8038 		spin_lock_irq(&inode->ordered_tree_lock);
8039 		set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
8040 		ordered->truncated_len = min(ordered->truncated_len,
8041 					     cur - ordered->file_offset);
8042 		spin_unlock_irq(&inode->ordered_tree_lock);
8043 
8044 		/*
8045 		 * If the ordered extent has finished, we're safe to delete all
8046 		 * the extent states of the range, otherwise
8047 		 * btrfs_finish_ordered_io() will get executed by endio for
8048 		 * other pages, so we can't delete extent states.
8049 		 */
8050 		if (btrfs_dec_test_ordered_pending(inode, &ordered,
8051 						   cur, range_end + 1 - cur)) {
8052 			btrfs_finish_ordered_io(ordered);
8053 			/*
8054 			 * The ordered extent has finished, now we're again
8055 			 * safe to delete all extent states of the range.
8056 			 */
8057 			extra_flags = EXTENT_CLEAR_ALL_BITS;
8058 		}
8059 next:
8060 		if (ordered)
8061 			btrfs_put_ordered_extent(ordered);
8062 		/*
8063 		 * Qgroup reserved space handler
8064 		 * Sector(s) here will be either:
8065 		 *
8066 		 * 1) Already written to disk or bio already finished
8067 		 *    Then its QGROUP_RESERVED bit in io_tree is already cleared.
8068 		 *    Qgroup will be handled by its qgroup_record then.
8069 		 *    btrfs_qgroup_free_data() call will do nothing here.
8070 		 *
8071 		 * 2) Not written to disk yet
8072 		 *    Then btrfs_qgroup_free_data() call will clear the
8073 		 *    QGROUP_RESERVED bit of its io_tree, and free the qgroup
8074 		 *    reserved data space.
8075 		 *    Since the IO will never happen for this page.
8076 		 */
8077 		btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur, NULL);
8078 		if (!inode_evicting) {
8079 			clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED |
8080 				 EXTENT_DELALLOC | EXTENT_UPTODATE |
8081 				 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG |
8082 				 extra_flags, &cached_state);
8083 		}
8084 		cur = range_end + 1;
8085 	}
8086 	/*
8087 	 * We have iterated through all ordered extents of the page, the page
8088 	 * should not have Ordered (Private2) anymore, or the above iteration
8089 	 * did something wrong.
8090 	 */
8091 	ASSERT(!folio_test_ordered(folio));
8092 	btrfs_folio_clear_checked(fs_info, folio, folio_pos(folio), folio_size(folio));
8093 	if (!inode_evicting)
8094 		__btrfs_release_folio(folio, GFP_NOFS);
8095 	clear_page_extent_mapped(&folio->page);
8096 }
8097 
8098 /*
8099  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
8100  * called from a page fault handler when a page is first dirtied. Hence we must
8101  * be careful to check for EOF conditions here. We set the page up correctly
8102  * for a written page which means we get ENOSPC checking when writing into
8103  * holes and correct delalloc and unwritten extent mapping on filesystems that
8104  * support these features.
8105  *
8106  * We are not allowed to take the i_mutex here so we have to play games to
8107  * protect against truncate races as the page could now be beyond EOF.  Because
8108  * truncate_setsize() writes the inode size before removing pages, once we have
8109  * the page lock we can determine safely if the page is beyond EOF. If it is not
8110  * beyond EOF, then the page is guaranteed safe against truncation until we
8111  * unlock the page.
8112  */
8113 vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
8114 {
8115 	struct page *page = vmf->page;
8116 	struct folio *folio = page_folio(page);
8117 	struct inode *inode = file_inode(vmf->vma->vm_file);
8118 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
8119 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
8120 	struct btrfs_ordered_extent *ordered;
8121 	struct extent_state *cached_state = NULL;
8122 	struct extent_changeset *data_reserved = NULL;
8123 	unsigned long zero_start;
8124 	loff_t size;
8125 	vm_fault_t ret;
8126 	int ret2;
8127 	int reserved = 0;
8128 	u64 reserved_space;
8129 	u64 page_start;
8130 	u64 page_end;
8131 	u64 end;
8132 
8133 	ASSERT(folio_order(folio) == 0);
8134 
8135 	reserved_space = PAGE_SIZE;
8136 
8137 	sb_start_pagefault(inode->i_sb);
8138 	page_start = page_offset(page);
8139 	page_end = page_start + PAGE_SIZE - 1;
8140 	end = page_end;
8141 
8142 	/*
8143 	 * Reserving delalloc space after obtaining the page lock can lead to
8144 	 * deadlock. For example, if a dirty page is locked by this function
8145 	 * and the call to btrfs_delalloc_reserve_space() ends up triggering
8146 	 * dirty page write out, then the btrfs_writepages() function could
8147 	 * end up waiting indefinitely to get a lock on the page currently
8148 	 * being processed by btrfs_page_mkwrite() function.
8149 	 */
8150 	ret2 = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved,
8151 					    page_start, reserved_space);
8152 	if (!ret2) {
8153 		ret2 = file_update_time(vmf->vma->vm_file);
8154 		reserved = 1;
8155 	}
8156 	if (ret2) {
8157 		ret = vmf_error(ret2);
8158 		if (reserved)
8159 			goto out;
8160 		goto out_noreserve;
8161 	}
8162 
8163 	ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
8164 again:
8165 	down_read(&BTRFS_I(inode)->i_mmap_lock);
8166 	lock_page(page);
8167 	size = i_size_read(inode);
8168 
8169 	if ((page->mapping != inode->i_mapping) ||
8170 	    (page_start >= size)) {
8171 		/* page got truncated out from underneath us */
8172 		goto out_unlock;
8173 	}
8174 	wait_on_page_writeback(page);
8175 
8176 	lock_extent(io_tree, page_start, page_end, &cached_state);
8177 	ret2 = set_page_extent_mapped(page);
8178 	if (ret2 < 0) {
8179 		ret = vmf_error(ret2);
8180 		unlock_extent(io_tree, page_start, page_end, &cached_state);
8181 		goto out_unlock;
8182 	}
8183 
8184 	/*
8185 	 * we can't set the delalloc bits if there are pending ordered
8186 	 * extents.  Drop our locks and wait for them to finish
8187 	 */
8188 	ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
8189 			PAGE_SIZE);
8190 	if (ordered) {
8191 		unlock_extent(io_tree, page_start, page_end, &cached_state);
8192 		unlock_page(page);
8193 		up_read(&BTRFS_I(inode)->i_mmap_lock);
8194 		btrfs_start_ordered_extent(ordered);
8195 		btrfs_put_ordered_extent(ordered);
8196 		goto again;
8197 	}
8198 
8199 	if (page->index == ((size - 1) >> PAGE_SHIFT)) {
8200 		reserved_space = round_up(size - page_start,
8201 					  fs_info->sectorsize);
8202 		if (reserved_space < PAGE_SIZE) {
8203 			end = page_start + reserved_space - 1;
8204 			btrfs_delalloc_release_space(BTRFS_I(inode),
8205 					data_reserved, page_start,
8206 					PAGE_SIZE - reserved_space, true);
8207 		}
8208 	}
8209 
8210 	/*
8211 	 * page_mkwrite gets called when the page is firstly dirtied after it's
8212 	 * faulted in, but write(2) could also dirty a page and set delalloc
8213 	 * bits, thus in this case for space account reason, we still need to
8214 	 * clear any delalloc bits within this page range since we have to
8215 	 * reserve data&meta space before lock_page() (see above comments).
8216 	 */
8217 	clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
8218 			  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
8219 			  EXTENT_DEFRAG, &cached_state);
8220 
8221 	ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0,
8222 					&cached_state);
8223 	if (ret2) {
8224 		unlock_extent(io_tree, page_start, page_end, &cached_state);
8225 		ret = VM_FAULT_SIGBUS;
8226 		goto out_unlock;
8227 	}
8228 
8229 	/* page is wholly or partially inside EOF */
8230 	if (page_start + PAGE_SIZE > size)
8231 		zero_start = offset_in_page(size);
8232 	else
8233 		zero_start = PAGE_SIZE;
8234 
8235 	if (zero_start != PAGE_SIZE)
8236 		memzero_page(page, zero_start, PAGE_SIZE - zero_start);
8237 
8238 	btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE);
8239 	btrfs_folio_set_dirty(fs_info, folio, page_start, end + 1 - page_start);
8240 	btrfs_folio_set_uptodate(fs_info, folio, page_start, end + 1 - page_start);
8241 
8242 	btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
8243 
8244 	unlock_extent(io_tree, page_start, page_end, &cached_state);
8245 	up_read(&BTRFS_I(inode)->i_mmap_lock);
8246 
8247 	btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
8248 	sb_end_pagefault(inode->i_sb);
8249 	extent_changeset_free(data_reserved);
8250 	return VM_FAULT_LOCKED;
8251 
8252 out_unlock:
8253 	unlock_page(page);
8254 	up_read(&BTRFS_I(inode)->i_mmap_lock);
8255 out:
8256 	btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
8257 	btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start,
8258 				     reserved_space, (ret != 0));
8259 out_noreserve:
8260 	sb_end_pagefault(inode->i_sb);
8261 	extent_changeset_free(data_reserved);
8262 	return ret;
8263 }
8264 
8265 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
8266 {
8267 	struct btrfs_truncate_control control = {
8268 		.inode = inode,
8269 		.ino = btrfs_ino(inode),
8270 		.min_type = BTRFS_EXTENT_DATA_KEY,
8271 		.clear_extent_range = true,
8272 	};
8273 	struct btrfs_root *root = inode->root;
8274 	struct btrfs_fs_info *fs_info = root->fs_info;
8275 	struct btrfs_block_rsv *rsv;
8276 	int ret;
8277 	struct btrfs_trans_handle *trans;
8278 	u64 mask = fs_info->sectorsize - 1;
8279 	const u64 min_size = btrfs_calc_metadata_size(fs_info, 1);
8280 
8281 	if (!skip_writeback) {
8282 		ret = btrfs_wait_ordered_range(&inode->vfs_inode,
8283 					       inode->vfs_inode.i_size & (~mask),
8284 					       (u64)-1);
8285 		if (ret)
8286 			return ret;
8287 	}
8288 
8289 	/*
8290 	 * Yes ladies and gentlemen, this is indeed ugly.  We have a couple of
8291 	 * things going on here:
8292 	 *
8293 	 * 1) We need to reserve space to update our inode.
8294 	 *
8295 	 * 2) We need to have something to cache all the space that is going to
8296 	 * be free'd up by the truncate operation, but also have some slack
8297 	 * space reserved in case it uses space during the truncate (thank you
8298 	 * very much snapshotting).
8299 	 *
8300 	 * And we need these to be separate.  The fact is we can use a lot of
8301 	 * space doing the truncate, and we have no earthly idea how much space
8302 	 * we will use, so we need the truncate reservation to be separate so it
8303 	 * doesn't end up using space reserved for updating the inode.  We also
8304 	 * need to be able to stop the transaction and start a new one, which
8305 	 * means we need to be able to update the inode several times, and we
8306 	 * have no idea of knowing how many times that will be, so we can't just
8307 	 * reserve 1 item for the entirety of the operation, so that has to be
8308 	 * done separately as well.
8309 	 *
8310 	 * So that leaves us with
8311 	 *
8312 	 * 1) rsv - for the truncate reservation, which we will steal from the
8313 	 * transaction reservation.
8314 	 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for
8315 	 * updating the inode.
8316 	 */
8317 	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
8318 	if (!rsv)
8319 		return -ENOMEM;
8320 	rsv->size = min_size;
8321 	rsv->failfast = true;
8322 
8323 	/*
8324 	 * 1 for the truncate slack space
8325 	 * 1 for updating the inode.
8326 	 */
8327 	trans = btrfs_start_transaction(root, 2);
8328 	if (IS_ERR(trans)) {
8329 		ret = PTR_ERR(trans);
8330 		goto out;
8331 	}
8332 
8333 	/* Migrate the slack space for the truncate to our reserve */
8334 	ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
8335 				      min_size, false);
8336 	/*
8337 	 * We have reserved 2 metadata units when we started the transaction and
8338 	 * min_size matches 1 unit, so this should never fail, but if it does,
8339 	 * it's not critical we just fail truncation.
8340 	 */
8341 	if (WARN_ON(ret)) {
8342 		btrfs_end_transaction(trans);
8343 		goto out;
8344 	}
8345 
8346 	trans->block_rsv = rsv;
8347 
8348 	while (1) {
8349 		struct extent_state *cached_state = NULL;
8350 		const u64 new_size = inode->vfs_inode.i_size;
8351 		const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize);
8352 
8353 		control.new_size = new_size;
8354 		lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
8355 		/*
8356 		 * We want to drop from the next block forward in case this new
8357 		 * size is not block aligned since we will be keeping the last
8358 		 * block of the extent just the way it is.
8359 		 */
8360 		btrfs_drop_extent_map_range(inode,
8361 					    ALIGN(new_size, fs_info->sectorsize),
8362 					    (u64)-1, false);
8363 
8364 		ret = btrfs_truncate_inode_items(trans, root, &control);
8365 
8366 		inode_sub_bytes(&inode->vfs_inode, control.sub_bytes);
8367 		btrfs_inode_safe_disk_i_size_write(inode, control.last_size);
8368 
8369 		unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
8370 
8371 		trans->block_rsv = &fs_info->trans_block_rsv;
8372 		if (ret != -ENOSPC && ret != -EAGAIN)
8373 			break;
8374 
8375 		ret = btrfs_update_inode(trans, inode);
8376 		if (ret)
8377 			break;
8378 
8379 		btrfs_end_transaction(trans);
8380 		btrfs_btree_balance_dirty(fs_info);
8381 
8382 		trans = btrfs_start_transaction(root, 2);
8383 		if (IS_ERR(trans)) {
8384 			ret = PTR_ERR(trans);
8385 			trans = NULL;
8386 			break;
8387 		}
8388 
8389 		btrfs_block_rsv_release(fs_info, rsv, -1, NULL);
8390 		ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
8391 					      rsv, min_size, false);
8392 		/*
8393 		 * We have reserved 2 metadata units when we started the
8394 		 * transaction and min_size matches 1 unit, so this should never
8395 		 * fail, but if it does, it's not critical we just fail truncation.
8396 		 */
8397 		if (WARN_ON(ret))
8398 			break;
8399 
8400 		trans->block_rsv = rsv;
8401 	}
8402 
8403 	/*
8404 	 * We can't call btrfs_truncate_block inside a trans handle as we could
8405 	 * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we
8406 	 * know we've truncated everything except the last little bit, and can
8407 	 * do btrfs_truncate_block and then update the disk_i_size.
8408 	 */
8409 	if (ret == BTRFS_NEED_TRUNCATE_BLOCK) {
8410 		btrfs_end_transaction(trans);
8411 		btrfs_btree_balance_dirty(fs_info);
8412 
8413 		ret = btrfs_truncate_block(inode, inode->vfs_inode.i_size, 0, 0);
8414 		if (ret)
8415 			goto out;
8416 		trans = btrfs_start_transaction(root, 1);
8417 		if (IS_ERR(trans)) {
8418 			ret = PTR_ERR(trans);
8419 			goto out;
8420 		}
8421 		btrfs_inode_safe_disk_i_size_write(inode, 0);
8422 	}
8423 
8424 	if (trans) {
8425 		int ret2;
8426 
8427 		trans->block_rsv = &fs_info->trans_block_rsv;
8428 		ret2 = btrfs_update_inode(trans, inode);
8429 		if (ret2 && !ret)
8430 			ret = ret2;
8431 
8432 		ret2 = btrfs_end_transaction(trans);
8433 		if (ret2 && !ret)
8434 			ret = ret2;
8435 		btrfs_btree_balance_dirty(fs_info);
8436 	}
8437 out:
8438 	btrfs_free_block_rsv(fs_info, rsv);
8439 	/*
8440 	 * So if we truncate and then write and fsync we normally would just
8441 	 * write the extents that changed, which is a problem if we need to
8442 	 * first truncate that entire inode.  So set this flag so we write out
8443 	 * all of the extents in the inode to the sync log so we're completely
8444 	 * safe.
8445 	 *
8446 	 * If no extents were dropped or trimmed we don't need to force the next
8447 	 * fsync to truncate all the inode's items from the log and re-log them
8448 	 * all. This means the truncate operation did not change the file size,
8449 	 * or changed it to a smaller size but there was only an implicit hole
8450 	 * between the old i_size and the new i_size, and there were no prealloc
8451 	 * extents beyond i_size to drop.
8452 	 */
8453 	if (control.extents_found > 0)
8454 		btrfs_set_inode_full_sync(inode);
8455 
8456 	return ret;
8457 }
8458 
8459 struct inode *btrfs_new_subvol_inode(struct mnt_idmap *idmap,
8460 				     struct inode *dir)
8461 {
8462 	struct inode *inode;
8463 
8464 	inode = new_inode(dir->i_sb);
8465 	if (inode) {
8466 		/*
8467 		 * Subvolumes don't inherit the sgid bit or the parent's gid if
8468 		 * the parent's sgid bit is set. This is probably a bug.
8469 		 */
8470 		inode_init_owner(idmap, inode, NULL,
8471 				 S_IFDIR | (~current_umask() & S_IRWXUGO));
8472 		inode->i_op = &btrfs_dir_inode_operations;
8473 		inode->i_fop = &btrfs_dir_file_operations;
8474 	}
8475 	return inode;
8476 }
8477 
8478 struct inode *btrfs_alloc_inode(struct super_block *sb)
8479 {
8480 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
8481 	struct btrfs_inode *ei;
8482 	struct inode *inode;
8483 	struct extent_io_tree *file_extent_tree = NULL;
8484 
8485 	/* Self tests may pass a NULL fs_info. */
8486 	if (fs_info && !btrfs_fs_incompat(fs_info, NO_HOLES)) {
8487 		file_extent_tree = kmalloc(sizeof(struct extent_io_tree), GFP_KERNEL);
8488 		if (!file_extent_tree)
8489 			return NULL;
8490 	}
8491 
8492 	ei = alloc_inode_sb(sb, btrfs_inode_cachep, GFP_KERNEL);
8493 	if (!ei) {
8494 		kfree(file_extent_tree);
8495 		return NULL;
8496 	}
8497 
8498 	ei->root = NULL;
8499 	ei->generation = 0;
8500 	ei->last_trans = 0;
8501 	ei->last_sub_trans = 0;
8502 	ei->logged_trans = 0;
8503 	ei->delalloc_bytes = 0;
8504 	ei->new_delalloc_bytes = 0;
8505 	ei->defrag_bytes = 0;
8506 	ei->disk_i_size = 0;
8507 	ei->flags = 0;
8508 	ei->ro_flags = 0;
8509 	ei->csum_bytes = 0;
8510 	ei->index_cnt = (u64)-1;
8511 	ei->dir_index = 0;
8512 	ei->last_unlink_trans = 0;
8513 	ei->last_reflink_trans = 0;
8514 	ei->last_log_commit = 0;
8515 
8516 	spin_lock_init(&ei->lock);
8517 	ei->outstanding_extents = 0;
8518 	if (sb->s_magic != BTRFS_TEST_MAGIC)
8519 		btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv,
8520 					      BTRFS_BLOCK_RSV_DELALLOC);
8521 	ei->runtime_flags = 0;
8522 	ei->prop_compress = BTRFS_COMPRESS_NONE;
8523 	ei->defrag_compress = BTRFS_COMPRESS_NONE;
8524 
8525 	ei->delayed_node = NULL;
8526 
8527 	ei->i_otime_sec = 0;
8528 	ei->i_otime_nsec = 0;
8529 
8530 	inode = &ei->vfs_inode;
8531 	extent_map_tree_init(&ei->extent_tree);
8532 
8533 	/* This io tree sets the valid inode. */
8534 	extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO);
8535 	ei->io_tree.inode = ei;
8536 
8537 	ei->file_extent_tree = file_extent_tree;
8538 	if (file_extent_tree) {
8539 		extent_io_tree_init(fs_info, ei->file_extent_tree,
8540 				    IO_TREE_INODE_FILE_EXTENT);
8541 		/* Lockdep class is set only for the file extent tree. */
8542 		lockdep_set_class(&ei->file_extent_tree->lock, &file_extent_tree_class);
8543 	}
8544 	mutex_init(&ei->log_mutex);
8545 	spin_lock_init(&ei->ordered_tree_lock);
8546 	ei->ordered_tree = RB_ROOT;
8547 	ei->ordered_tree_last = NULL;
8548 	INIT_LIST_HEAD(&ei->delalloc_inodes);
8549 	INIT_LIST_HEAD(&ei->delayed_iput);
8550 	RB_CLEAR_NODE(&ei->rb_node);
8551 	init_rwsem(&ei->i_mmap_lock);
8552 
8553 	return inode;
8554 }
8555 
8556 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
8557 void btrfs_test_destroy_inode(struct inode *inode)
8558 {
8559 	btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
8560 	kfree(BTRFS_I(inode)->file_extent_tree);
8561 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
8562 }
8563 #endif
8564 
8565 void btrfs_free_inode(struct inode *inode)
8566 {
8567 	kfree(BTRFS_I(inode)->file_extent_tree);
8568 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
8569 }
8570 
8571 void btrfs_destroy_inode(struct inode *vfs_inode)
8572 {
8573 	struct btrfs_ordered_extent *ordered;
8574 	struct btrfs_inode *inode = BTRFS_I(vfs_inode);
8575 	struct btrfs_root *root = inode->root;
8576 	bool freespace_inode;
8577 
8578 	WARN_ON(!hlist_empty(&vfs_inode->i_dentry));
8579 	WARN_ON(vfs_inode->i_data.nrpages);
8580 	WARN_ON(inode->block_rsv.reserved);
8581 	WARN_ON(inode->block_rsv.size);
8582 	WARN_ON(inode->outstanding_extents);
8583 	if (!S_ISDIR(vfs_inode->i_mode)) {
8584 		WARN_ON(inode->delalloc_bytes);
8585 		WARN_ON(inode->new_delalloc_bytes);
8586 	}
8587 	WARN_ON(inode->csum_bytes);
8588 	WARN_ON(inode->defrag_bytes);
8589 
8590 	/*
8591 	 * This can happen where we create an inode, but somebody else also
8592 	 * created the same inode and we need to destroy the one we already
8593 	 * created.
8594 	 */
8595 	if (!root)
8596 		return;
8597 
8598 	/*
8599 	 * If this is a free space inode do not take the ordered extents lockdep
8600 	 * map.
8601 	 */
8602 	freespace_inode = btrfs_is_free_space_inode(inode);
8603 
8604 	while (1) {
8605 		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
8606 		if (!ordered)
8607 			break;
8608 		else {
8609 			btrfs_err(root->fs_info,
8610 				  "found ordered extent %llu %llu on inode cleanup",
8611 				  ordered->file_offset, ordered->num_bytes);
8612 
8613 			if (!freespace_inode)
8614 				btrfs_lockdep_acquire(root->fs_info, btrfs_ordered_extent);
8615 
8616 			btrfs_remove_ordered_extent(inode, ordered);
8617 			btrfs_put_ordered_extent(ordered);
8618 			btrfs_put_ordered_extent(ordered);
8619 		}
8620 	}
8621 	btrfs_qgroup_check_reserved_leak(inode);
8622 	inode_tree_del(inode);
8623 	btrfs_drop_extent_map_range(inode, 0, (u64)-1, false);
8624 	btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1);
8625 	btrfs_put_root(inode->root);
8626 }
8627 
8628 int btrfs_drop_inode(struct inode *inode)
8629 {
8630 	struct btrfs_root *root = BTRFS_I(inode)->root;
8631 
8632 	if (root == NULL)
8633 		return 1;
8634 
8635 	/* the snap/subvol tree is on deleting */
8636 	if (btrfs_root_refs(&root->root_item) == 0)
8637 		return 1;
8638 	else
8639 		return generic_drop_inode(inode);
8640 }
8641 
8642 static void init_once(void *foo)
8643 {
8644 	struct btrfs_inode *ei = foo;
8645 
8646 	inode_init_once(&ei->vfs_inode);
8647 }
8648 
8649 void __cold btrfs_destroy_cachep(void)
8650 {
8651 	/*
8652 	 * Make sure all delayed rcu free inodes are flushed before we
8653 	 * destroy cache.
8654 	 */
8655 	rcu_barrier();
8656 	bioset_exit(&btrfs_dio_bioset);
8657 	kmem_cache_destroy(btrfs_inode_cachep);
8658 }
8659 
8660 int __init btrfs_init_cachep(void)
8661 {
8662 	btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
8663 			sizeof(struct btrfs_inode), 0,
8664 			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT,
8665 			init_once);
8666 	if (!btrfs_inode_cachep)
8667 		goto fail;
8668 
8669 	if (bioset_init(&btrfs_dio_bioset, BIO_POOL_SIZE,
8670 			offsetof(struct btrfs_dio_private, bbio.bio),
8671 			BIOSET_NEED_BVECS))
8672 		goto fail;
8673 
8674 	return 0;
8675 fail:
8676 	btrfs_destroy_cachep();
8677 	return -ENOMEM;
8678 }
8679 
8680 static int btrfs_getattr(struct mnt_idmap *idmap,
8681 			 const struct path *path, struct kstat *stat,
8682 			 u32 request_mask, unsigned int flags)
8683 {
8684 	u64 delalloc_bytes;
8685 	u64 inode_bytes;
8686 	struct inode *inode = d_inode(path->dentry);
8687 	u32 blocksize = inode->i_sb->s_blocksize;
8688 	u32 bi_flags = BTRFS_I(inode)->flags;
8689 	u32 bi_ro_flags = BTRFS_I(inode)->ro_flags;
8690 
8691 	stat->result_mask |= STATX_BTIME;
8692 	stat->btime.tv_sec = BTRFS_I(inode)->i_otime_sec;
8693 	stat->btime.tv_nsec = BTRFS_I(inode)->i_otime_nsec;
8694 	if (bi_flags & BTRFS_INODE_APPEND)
8695 		stat->attributes |= STATX_ATTR_APPEND;
8696 	if (bi_flags & BTRFS_INODE_COMPRESS)
8697 		stat->attributes |= STATX_ATTR_COMPRESSED;
8698 	if (bi_flags & BTRFS_INODE_IMMUTABLE)
8699 		stat->attributes |= STATX_ATTR_IMMUTABLE;
8700 	if (bi_flags & BTRFS_INODE_NODUMP)
8701 		stat->attributes |= STATX_ATTR_NODUMP;
8702 	if (bi_ro_flags & BTRFS_INODE_RO_VERITY)
8703 		stat->attributes |= STATX_ATTR_VERITY;
8704 
8705 	stat->attributes_mask |= (STATX_ATTR_APPEND |
8706 				  STATX_ATTR_COMPRESSED |
8707 				  STATX_ATTR_IMMUTABLE |
8708 				  STATX_ATTR_NODUMP);
8709 
8710 	generic_fillattr(idmap, request_mask, inode, stat);
8711 	stat->dev = BTRFS_I(inode)->root->anon_dev;
8712 
8713 	spin_lock(&BTRFS_I(inode)->lock);
8714 	delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes;
8715 	inode_bytes = inode_get_bytes(inode);
8716 	spin_unlock(&BTRFS_I(inode)->lock);
8717 	stat->blocks = (ALIGN(inode_bytes, blocksize) +
8718 			ALIGN(delalloc_bytes, blocksize)) >> SECTOR_SHIFT;
8719 	return 0;
8720 }
8721 
8722 static int btrfs_rename_exchange(struct inode *old_dir,
8723 			      struct dentry *old_dentry,
8724 			      struct inode *new_dir,
8725 			      struct dentry *new_dentry)
8726 {
8727 	struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
8728 	struct btrfs_trans_handle *trans;
8729 	unsigned int trans_num_items;
8730 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
8731 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
8732 	struct inode *new_inode = new_dentry->d_inode;
8733 	struct inode *old_inode = old_dentry->d_inode;
8734 	struct btrfs_rename_ctx old_rename_ctx;
8735 	struct btrfs_rename_ctx new_rename_ctx;
8736 	u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
8737 	u64 new_ino = btrfs_ino(BTRFS_I(new_inode));
8738 	u64 old_idx = 0;
8739 	u64 new_idx = 0;
8740 	int ret;
8741 	int ret2;
8742 	bool need_abort = false;
8743 	struct fscrypt_name old_fname, new_fname;
8744 	struct fscrypt_str *old_name, *new_name;
8745 
8746 	/*
8747 	 * For non-subvolumes allow exchange only within one subvolume, in the
8748 	 * same inode namespace. Two subvolumes (represented as directory) can
8749 	 * be exchanged as they're a logical link and have a fixed inode number.
8750 	 */
8751 	if (root != dest &&
8752 	    (old_ino != BTRFS_FIRST_FREE_OBJECTID ||
8753 	     new_ino != BTRFS_FIRST_FREE_OBJECTID))
8754 		return -EXDEV;
8755 
8756 	ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname);
8757 	if (ret)
8758 		return ret;
8759 
8760 	ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname);
8761 	if (ret) {
8762 		fscrypt_free_filename(&old_fname);
8763 		return ret;
8764 	}
8765 
8766 	old_name = &old_fname.disk_name;
8767 	new_name = &new_fname.disk_name;
8768 
8769 	/* close the race window with snapshot create/destroy ioctl */
8770 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID ||
8771 	    new_ino == BTRFS_FIRST_FREE_OBJECTID)
8772 		down_read(&fs_info->subvol_sem);
8773 
8774 	/*
8775 	 * For each inode:
8776 	 * 1 to remove old dir item
8777 	 * 1 to remove old dir index
8778 	 * 1 to add new dir item
8779 	 * 1 to add new dir index
8780 	 * 1 to update parent inode
8781 	 *
8782 	 * If the parents are the same, we only need to account for one
8783 	 */
8784 	trans_num_items = (old_dir == new_dir ? 9 : 10);
8785 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8786 		/*
8787 		 * 1 to remove old root ref
8788 		 * 1 to remove old root backref
8789 		 * 1 to add new root ref
8790 		 * 1 to add new root backref
8791 		 */
8792 		trans_num_items += 4;
8793 	} else {
8794 		/*
8795 		 * 1 to update inode item
8796 		 * 1 to remove old inode ref
8797 		 * 1 to add new inode ref
8798 		 */
8799 		trans_num_items += 3;
8800 	}
8801 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
8802 		trans_num_items += 4;
8803 	else
8804 		trans_num_items += 3;
8805 	trans = btrfs_start_transaction(root, trans_num_items);
8806 	if (IS_ERR(trans)) {
8807 		ret = PTR_ERR(trans);
8808 		goto out_notrans;
8809 	}
8810 
8811 	if (dest != root) {
8812 		ret = btrfs_record_root_in_trans(trans, dest);
8813 		if (ret)
8814 			goto out_fail;
8815 	}
8816 
8817 	/*
8818 	 * We need to find a free sequence number both in the source and
8819 	 * in the destination directory for the exchange.
8820 	 */
8821 	ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx);
8822 	if (ret)
8823 		goto out_fail;
8824 	ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx);
8825 	if (ret)
8826 		goto out_fail;
8827 
8828 	BTRFS_I(old_inode)->dir_index = 0ULL;
8829 	BTRFS_I(new_inode)->dir_index = 0ULL;
8830 
8831 	/* Reference for the source. */
8832 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8833 		/* force full log commit if subvolume involved. */
8834 		btrfs_set_log_full_commit(trans);
8835 	} else {
8836 		ret = btrfs_insert_inode_ref(trans, dest, new_name, old_ino,
8837 					     btrfs_ino(BTRFS_I(new_dir)),
8838 					     old_idx);
8839 		if (ret)
8840 			goto out_fail;
8841 		need_abort = true;
8842 	}
8843 
8844 	/* And now for the dest. */
8845 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
8846 		/* force full log commit if subvolume involved. */
8847 		btrfs_set_log_full_commit(trans);
8848 	} else {
8849 		ret = btrfs_insert_inode_ref(trans, root, old_name, new_ino,
8850 					     btrfs_ino(BTRFS_I(old_dir)),
8851 					     new_idx);
8852 		if (ret) {
8853 			if (need_abort)
8854 				btrfs_abort_transaction(trans, ret);
8855 			goto out_fail;
8856 		}
8857 	}
8858 
8859 	/* Update inode version and ctime/mtime. */
8860 	inode_inc_iversion(old_dir);
8861 	inode_inc_iversion(new_dir);
8862 	inode_inc_iversion(old_inode);
8863 	inode_inc_iversion(new_inode);
8864 	simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
8865 
8866 	if (old_dentry->d_parent != new_dentry->d_parent) {
8867 		btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
8868 					BTRFS_I(old_inode), true);
8869 		btrfs_record_unlink_dir(trans, BTRFS_I(new_dir),
8870 					BTRFS_I(new_inode), true);
8871 	}
8872 
8873 	/* src is a subvolume */
8874 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8875 		ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry);
8876 	} else { /* src is an inode */
8877 		ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
8878 					   BTRFS_I(old_dentry->d_inode),
8879 					   old_name, &old_rename_ctx);
8880 		if (!ret)
8881 			ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
8882 	}
8883 	if (ret) {
8884 		btrfs_abort_transaction(trans, ret);
8885 		goto out_fail;
8886 	}
8887 
8888 	/* dest is a subvolume */
8889 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
8890 		ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
8891 	} else { /* dest is an inode */
8892 		ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir),
8893 					   BTRFS_I(new_dentry->d_inode),
8894 					   new_name, &new_rename_ctx);
8895 		if (!ret)
8896 			ret = btrfs_update_inode(trans, BTRFS_I(new_inode));
8897 	}
8898 	if (ret) {
8899 		btrfs_abort_transaction(trans, ret);
8900 		goto out_fail;
8901 	}
8902 
8903 	ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
8904 			     new_name, 0, old_idx);
8905 	if (ret) {
8906 		btrfs_abort_transaction(trans, ret);
8907 		goto out_fail;
8908 	}
8909 
8910 	ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode),
8911 			     old_name, 0, new_idx);
8912 	if (ret) {
8913 		btrfs_abort_transaction(trans, ret);
8914 		goto out_fail;
8915 	}
8916 
8917 	if (old_inode->i_nlink == 1)
8918 		BTRFS_I(old_inode)->dir_index = old_idx;
8919 	if (new_inode->i_nlink == 1)
8920 		BTRFS_I(new_inode)->dir_index = new_idx;
8921 
8922 	/*
8923 	 * Now pin the logs of the roots. We do it to ensure that no other task
8924 	 * can sync the logs while we are in progress with the rename, because
8925 	 * that could result in an inconsistency in case any of the inodes that
8926 	 * are part of this rename operation were logged before.
8927 	 */
8928 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
8929 		btrfs_pin_log_trans(root);
8930 	if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
8931 		btrfs_pin_log_trans(dest);
8932 
8933 	/* Do the log updates for all inodes. */
8934 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
8935 		btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
8936 				   old_rename_ctx.index, new_dentry->d_parent);
8937 	if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
8938 		btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir),
8939 				   new_rename_ctx.index, old_dentry->d_parent);
8940 
8941 	/* Now unpin the logs. */
8942 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
8943 		btrfs_end_log_trans(root);
8944 	if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
8945 		btrfs_end_log_trans(dest);
8946 out_fail:
8947 	ret2 = btrfs_end_transaction(trans);
8948 	ret = ret ? ret : ret2;
8949 out_notrans:
8950 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID ||
8951 	    old_ino == BTRFS_FIRST_FREE_OBJECTID)
8952 		up_read(&fs_info->subvol_sem);
8953 
8954 	fscrypt_free_filename(&new_fname);
8955 	fscrypt_free_filename(&old_fname);
8956 	return ret;
8957 }
8958 
8959 static struct inode *new_whiteout_inode(struct mnt_idmap *idmap,
8960 					struct inode *dir)
8961 {
8962 	struct inode *inode;
8963 
8964 	inode = new_inode(dir->i_sb);
8965 	if (inode) {
8966 		inode_init_owner(idmap, inode, dir,
8967 				 S_IFCHR | WHITEOUT_MODE);
8968 		inode->i_op = &btrfs_special_inode_operations;
8969 		init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
8970 	}
8971 	return inode;
8972 }
8973 
8974 static int btrfs_rename(struct mnt_idmap *idmap,
8975 			struct inode *old_dir, struct dentry *old_dentry,
8976 			struct inode *new_dir, struct dentry *new_dentry,
8977 			unsigned int flags)
8978 {
8979 	struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
8980 	struct btrfs_new_inode_args whiteout_args = {
8981 		.dir = old_dir,
8982 		.dentry = old_dentry,
8983 	};
8984 	struct btrfs_trans_handle *trans;
8985 	unsigned int trans_num_items;
8986 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
8987 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
8988 	struct inode *new_inode = d_inode(new_dentry);
8989 	struct inode *old_inode = d_inode(old_dentry);
8990 	struct btrfs_rename_ctx rename_ctx;
8991 	u64 index = 0;
8992 	int ret;
8993 	int ret2;
8994 	u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
8995 	struct fscrypt_name old_fname, new_fname;
8996 
8997 	if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
8998 		return -EPERM;
8999 
9000 	/* we only allow rename subvolume link between subvolumes */
9001 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
9002 		return -EXDEV;
9003 
9004 	if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
9005 	    (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID))
9006 		return -ENOTEMPTY;
9007 
9008 	if (S_ISDIR(old_inode->i_mode) && new_inode &&
9009 	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
9010 		return -ENOTEMPTY;
9011 
9012 	ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname);
9013 	if (ret)
9014 		return ret;
9015 
9016 	ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname);
9017 	if (ret) {
9018 		fscrypt_free_filename(&old_fname);
9019 		return ret;
9020 	}
9021 
9022 	/* check for collisions, even if the  name isn't there */
9023 	ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, &new_fname.disk_name);
9024 	if (ret) {
9025 		if (ret == -EEXIST) {
9026 			/* we shouldn't get
9027 			 * eexist without a new_inode */
9028 			if (WARN_ON(!new_inode)) {
9029 				goto out_fscrypt_names;
9030 			}
9031 		} else {
9032 			/* maybe -EOVERFLOW */
9033 			goto out_fscrypt_names;
9034 		}
9035 	}
9036 	ret = 0;
9037 
9038 	/*
9039 	 * we're using rename to replace one file with another.  Start IO on it
9040 	 * now so  we don't add too much work to the end of the transaction
9041 	 */
9042 	if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
9043 		filemap_flush(old_inode->i_mapping);
9044 
9045 	if (flags & RENAME_WHITEOUT) {
9046 		whiteout_args.inode = new_whiteout_inode(idmap, old_dir);
9047 		if (!whiteout_args.inode) {
9048 			ret = -ENOMEM;
9049 			goto out_fscrypt_names;
9050 		}
9051 		ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items);
9052 		if (ret)
9053 			goto out_whiteout_inode;
9054 	} else {
9055 		/* 1 to update the old parent inode. */
9056 		trans_num_items = 1;
9057 	}
9058 
9059 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9060 		/* Close the race window with snapshot create/destroy ioctl */
9061 		down_read(&fs_info->subvol_sem);
9062 		/*
9063 		 * 1 to remove old root ref
9064 		 * 1 to remove old root backref
9065 		 * 1 to add new root ref
9066 		 * 1 to add new root backref
9067 		 */
9068 		trans_num_items += 4;
9069 	} else {
9070 		/*
9071 		 * 1 to update inode
9072 		 * 1 to remove old inode ref
9073 		 * 1 to add new inode ref
9074 		 */
9075 		trans_num_items += 3;
9076 	}
9077 	/*
9078 	 * 1 to remove old dir item
9079 	 * 1 to remove old dir index
9080 	 * 1 to add new dir item
9081 	 * 1 to add new dir index
9082 	 */
9083 	trans_num_items += 4;
9084 	/* 1 to update new parent inode if it's not the same as the old parent */
9085 	if (new_dir != old_dir)
9086 		trans_num_items++;
9087 	if (new_inode) {
9088 		/*
9089 		 * 1 to update inode
9090 		 * 1 to remove inode ref
9091 		 * 1 to remove dir item
9092 		 * 1 to remove dir index
9093 		 * 1 to possibly add orphan item
9094 		 */
9095 		trans_num_items += 5;
9096 	}
9097 	trans = btrfs_start_transaction(root, trans_num_items);
9098 	if (IS_ERR(trans)) {
9099 		ret = PTR_ERR(trans);
9100 		goto out_notrans;
9101 	}
9102 
9103 	if (dest != root) {
9104 		ret = btrfs_record_root_in_trans(trans, dest);
9105 		if (ret)
9106 			goto out_fail;
9107 	}
9108 
9109 	ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index);
9110 	if (ret)
9111 		goto out_fail;
9112 
9113 	BTRFS_I(old_inode)->dir_index = 0ULL;
9114 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9115 		/* force full log commit if subvolume involved. */
9116 		btrfs_set_log_full_commit(trans);
9117 	} else {
9118 		ret = btrfs_insert_inode_ref(trans, dest, &new_fname.disk_name,
9119 					     old_ino, btrfs_ino(BTRFS_I(new_dir)),
9120 					     index);
9121 		if (ret)
9122 			goto out_fail;
9123 	}
9124 
9125 	inode_inc_iversion(old_dir);
9126 	inode_inc_iversion(new_dir);
9127 	inode_inc_iversion(old_inode);
9128 	simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
9129 
9130 	if (old_dentry->d_parent != new_dentry->d_parent)
9131 		btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
9132 					BTRFS_I(old_inode), true);
9133 
9134 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9135 		ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry);
9136 	} else {
9137 		ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
9138 					   BTRFS_I(d_inode(old_dentry)),
9139 					   &old_fname.disk_name, &rename_ctx);
9140 		if (!ret)
9141 			ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
9142 	}
9143 	if (ret) {
9144 		btrfs_abort_transaction(trans, ret);
9145 		goto out_fail;
9146 	}
9147 
9148 	if (new_inode) {
9149 		inode_inc_iversion(new_inode);
9150 		if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
9151 			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
9152 			ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
9153 			BUG_ON(new_inode->i_nlink == 0);
9154 		} else {
9155 			ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir),
9156 						 BTRFS_I(d_inode(new_dentry)),
9157 						 &new_fname.disk_name);
9158 		}
9159 		if (!ret && new_inode->i_nlink == 0)
9160 			ret = btrfs_orphan_add(trans,
9161 					BTRFS_I(d_inode(new_dentry)));
9162 		if (ret) {
9163 			btrfs_abort_transaction(trans, ret);
9164 			goto out_fail;
9165 		}
9166 	}
9167 
9168 	ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
9169 			     &new_fname.disk_name, 0, index);
9170 	if (ret) {
9171 		btrfs_abort_transaction(trans, ret);
9172 		goto out_fail;
9173 	}
9174 
9175 	if (old_inode->i_nlink == 1)
9176 		BTRFS_I(old_inode)->dir_index = index;
9177 
9178 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
9179 		btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
9180 				   rename_ctx.index, new_dentry->d_parent);
9181 
9182 	if (flags & RENAME_WHITEOUT) {
9183 		ret = btrfs_create_new_inode(trans, &whiteout_args);
9184 		if (ret) {
9185 			btrfs_abort_transaction(trans, ret);
9186 			goto out_fail;
9187 		} else {
9188 			unlock_new_inode(whiteout_args.inode);
9189 			iput(whiteout_args.inode);
9190 			whiteout_args.inode = NULL;
9191 		}
9192 	}
9193 out_fail:
9194 	ret2 = btrfs_end_transaction(trans);
9195 	ret = ret ? ret : ret2;
9196 out_notrans:
9197 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9198 		up_read(&fs_info->subvol_sem);
9199 	if (flags & RENAME_WHITEOUT)
9200 		btrfs_new_inode_args_destroy(&whiteout_args);
9201 out_whiteout_inode:
9202 	if (flags & RENAME_WHITEOUT)
9203 		iput(whiteout_args.inode);
9204 out_fscrypt_names:
9205 	fscrypt_free_filename(&old_fname);
9206 	fscrypt_free_filename(&new_fname);
9207 	return ret;
9208 }
9209 
9210 static int btrfs_rename2(struct mnt_idmap *idmap, struct inode *old_dir,
9211 			 struct dentry *old_dentry, struct inode *new_dir,
9212 			 struct dentry *new_dentry, unsigned int flags)
9213 {
9214 	int ret;
9215 
9216 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
9217 		return -EINVAL;
9218 
9219 	if (flags & RENAME_EXCHANGE)
9220 		ret = btrfs_rename_exchange(old_dir, old_dentry, new_dir,
9221 					    new_dentry);
9222 	else
9223 		ret = btrfs_rename(idmap, old_dir, old_dentry, new_dir,
9224 				   new_dentry, flags);
9225 
9226 	btrfs_btree_balance_dirty(BTRFS_I(new_dir)->root->fs_info);
9227 
9228 	return ret;
9229 }
9230 
9231 struct btrfs_delalloc_work {
9232 	struct inode *inode;
9233 	struct completion completion;
9234 	struct list_head list;
9235 	struct btrfs_work work;
9236 };
9237 
9238 static void btrfs_run_delalloc_work(struct btrfs_work *work)
9239 {
9240 	struct btrfs_delalloc_work *delalloc_work;
9241 	struct inode *inode;
9242 
9243 	delalloc_work = container_of(work, struct btrfs_delalloc_work,
9244 				     work);
9245 	inode = delalloc_work->inode;
9246 	filemap_flush(inode->i_mapping);
9247 	if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
9248 				&BTRFS_I(inode)->runtime_flags))
9249 		filemap_flush(inode->i_mapping);
9250 
9251 	iput(inode);
9252 	complete(&delalloc_work->completion);
9253 }
9254 
9255 static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode)
9256 {
9257 	struct btrfs_delalloc_work *work;
9258 
9259 	work = kmalloc(sizeof(*work), GFP_NOFS);
9260 	if (!work)
9261 		return NULL;
9262 
9263 	init_completion(&work->completion);
9264 	INIT_LIST_HEAD(&work->list);
9265 	work->inode = inode;
9266 	btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL);
9267 
9268 	return work;
9269 }
9270 
9271 /*
9272  * some fairly slow code that needs optimization. This walks the list
9273  * of all the inodes with pending delalloc and forces them to disk.
9274  */
9275 static int start_delalloc_inodes(struct btrfs_root *root,
9276 				 struct writeback_control *wbc, bool snapshot,
9277 				 bool in_reclaim_context)
9278 {
9279 	struct btrfs_inode *binode;
9280 	struct inode *inode;
9281 	struct btrfs_delalloc_work *work, *next;
9282 	LIST_HEAD(works);
9283 	LIST_HEAD(splice);
9284 	int ret = 0;
9285 	bool full_flush = wbc->nr_to_write == LONG_MAX;
9286 
9287 	mutex_lock(&root->delalloc_mutex);
9288 	spin_lock(&root->delalloc_lock);
9289 	list_splice_init(&root->delalloc_inodes, &splice);
9290 	while (!list_empty(&splice)) {
9291 		binode = list_entry(splice.next, struct btrfs_inode,
9292 				    delalloc_inodes);
9293 
9294 		list_move_tail(&binode->delalloc_inodes,
9295 			       &root->delalloc_inodes);
9296 
9297 		if (in_reclaim_context &&
9298 		    test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags))
9299 			continue;
9300 
9301 		inode = igrab(&binode->vfs_inode);
9302 		if (!inode) {
9303 			cond_resched_lock(&root->delalloc_lock);
9304 			continue;
9305 		}
9306 		spin_unlock(&root->delalloc_lock);
9307 
9308 		if (snapshot)
9309 			set_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
9310 				&binode->runtime_flags);
9311 		if (full_flush) {
9312 			work = btrfs_alloc_delalloc_work(inode);
9313 			if (!work) {
9314 				iput(inode);
9315 				ret = -ENOMEM;
9316 				goto out;
9317 			}
9318 			list_add_tail(&work->list, &works);
9319 			btrfs_queue_work(root->fs_info->flush_workers,
9320 					 &work->work);
9321 		} else {
9322 			ret = filemap_fdatawrite_wbc(inode->i_mapping, wbc);
9323 			btrfs_add_delayed_iput(BTRFS_I(inode));
9324 			if (ret || wbc->nr_to_write <= 0)
9325 				goto out;
9326 		}
9327 		cond_resched();
9328 		spin_lock(&root->delalloc_lock);
9329 	}
9330 	spin_unlock(&root->delalloc_lock);
9331 
9332 out:
9333 	list_for_each_entry_safe(work, next, &works, list) {
9334 		list_del_init(&work->list);
9335 		wait_for_completion(&work->completion);
9336 		kfree(work);
9337 	}
9338 
9339 	if (!list_empty(&splice)) {
9340 		spin_lock(&root->delalloc_lock);
9341 		list_splice_tail(&splice, &root->delalloc_inodes);
9342 		spin_unlock(&root->delalloc_lock);
9343 	}
9344 	mutex_unlock(&root->delalloc_mutex);
9345 	return ret;
9346 }
9347 
9348 int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context)
9349 {
9350 	struct writeback_control wbc = {
9351 		.nr_to_write = LONG_MAX,
9352 		.sync_mode = WB_SYNC_NONE,
9353 		.range_start = 0,
9354 		.range_end = LLONG_MAX,
9355 	};
9356 	struct btrfs_fs_info *fs_info = root->fs_info;
9357 
9358 	if (BTRFS_FS_ERROR(fs_info))
9359 		return -EROFS;
9360 
9361 	return start_delalloc_inodes(root, &wbc, true, in_reclaim_context);
9362 }
9363 
9364 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
9365 			       bool in_reclaim_context)
9366 {
9367 	struct writeback_control wbc = {
9368 		.nr_to_write = nr,
9369 		.sync_mode = WB_SYNC_NONE,
9370 		.range_start = 0,
9371 		.range_end = LLONG_MAX,
9372 	};
9373 	struct btrfs_root *root;
9374 	LIST_HEAD(splice);
9375 	int ret;
9376 
9377 	if (BTRFS_FS_ERROR(fs_info))
9378 		return -EROFS;
9379 
9380 	mutex_lock(&fs_info->delalloc_root_mutex);
9381 	spin_lock(&fs_info->delalloc_root_lock);
9382 	list_splice_init(&fs_info->delalloc_roots, &splice);
9383 	while (!list_empty(&splice)) {
9384 		/*
9385 		 * Reset nr_to_write here so we know that we're doing a full
9386 		 * flush.
9387 		 */
9388 		if (nr == LONG_MAX)
9389 			wbc.nr_to_write = LONG_MAX;
9390 
9391 		root = list_first_entry(&splice, struct btrfs_root,
9392 					delalloc_root);
9393 		root = btrfs_grab_root(root);
9394 		BUG_ON(!root);
9395 		list_move_tail(&root->delalloc_root,
9396 			       &fs_info->delalloc_roots);
9397 		spin_unlock(&fs_info->delalloc_root_lock);
9398 
9399 		ret = start_delalloc_inodes(root, &wbc, false, in_reclaim_context);
9400 		btrfs_put_root(root);
9401 		if (ret < 0 || wbc.nr_to_write <= 0)
9402 			goto out;
9403 		spin_lock(&fs_info->delalloc_root_lock);
9404 	}
9405 	spin_unlock(&fs_info->delalloc_root_lock);
9406 
9407 	ret = 0;
9408 out:
9409 	if (!list_empty(&splice)) {
9410 		spin_lock(&fs_info->delalloc_root_lock);
9411 		list_splice_tail(&splice, &fs_info->delalloc_roots);
9412 		spin_unlock(&fs_info->delalloc_root_lock);
9413 	}
9414 	mutex_unlock(&fs_info->delalloc_root_mutex);
9415 	return ret;
9416 }
9417 
9418 static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
9419 			 struct dentry *dentry, const char *symname)
9420 {
9421 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
9422 	struct btrfs_trans_handle *trans;
9423 	struct btrfs_root *root = BTRFS_I(dir)->root;
9424 	struct btrfs_path *path;
9425 	struct btrfs_key key;
9426 	struct inode *inode;
9427 	struct btrfs_new_inode_args new_inode_args = {
9428 		.dir = dir,
9429 		.dentry = dentry,
9430 	};
9431 	unsigned int trans_num_items;
9432 	int err;
9433 	int name_len;
9434 	int datasize;
9435 	unsigned long ptr;
9436 	struct btrfs_file_extent_item *ei;
9437 	struct extent_buffer *leaf;
9438 
9439 	name_len = strlen(symname);
9440 	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
9441 		return -ENAMETOOLONG;
9442 
9443 	inode = new_inode(dir->i_sb);
9444 	if (!inode)
9445 		return -ENOMEM;
9446 	inode_init_owner(idmap, inode, dir, S_IFLNK | S_IRWXUGO);
9447 	inode->i_op = &btrfs_symlink_inode_operations;
9448 	inode_nohighmem(inode);
9449 	inode->i_mapping->a_ops = &btrfs_aops;
9450 	btrfs_i_size_write(BTRFS_I(inode), name_len);
9451 	inode_set_bytes(inode, name_len);
9452 
9453 	new_inode_args.inode = inode;
9454 	err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
9455 	if (err)
9456 		goto out_inode;
9457 	/* 1 additional item for the inline extent */
9458 	trans_num_items++;
9459 
9460 	trans = btrfs_start_transaction(root, trans_num_items);
9461 	if (IS_ERR(trans)) {
9462 		err = PTR_ERR(trans);
9463 		goto out_new_inode_args;
9464 	}
9465 
9466 	err = btrfs_create_new_inode(trans, &new_inode_args);
9467 	if (err)
9468 		goto out;
9469 
9470 	path = btrfs_alloc_path();
9471 	if (!path) {
9472 		err = -ENOMEM;
9473 		btrfs_abort_transaction(trans, err);
9474 		discard_new_inode(inode);
9475 		inode = NULL;
9476 		goto out;
9477 	}
9478 	key.objectid = btrfs_ino(BTRFS_I(inode));
9479 	key.offset = 0;
9480 	key.type = BTRFS_EXTENT_DATA_KEY;
9481 	datasize = btrfs_file_extent_calc_inline_size(name_len);
9482 	err = btrfs_insert_empty_item(trans, root, path, &key,
9483 				      datasize);
9484 	if (err) {
9485 		btrfs_abort_transaction(trans, err);
9486 		btrfs_free_path(path);
9487 		discard_new_inode(inode);
9488 		inode = NULL;
9489 		goto out;
9490 	}
9491 	leaf = path->nodes[0];
9492 	ei = btrfs_item_ptr(leaf, path->slots[0],
9493 			    struct btrfs_file_extent_item);
9494 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
9495 	btrfs_set_file_extent_type(leaf, ei,
9496 				   BTRFS_FILE_EXTENT_INLINE);
9497 	btrfs_set_file_extent_encryption(leaf, ei, 0);
9498 	btrfs_set_file_extent_compression(leaf, ei, 0);
9499 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
9500 	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
9501 
9502 	ptr = btrfs_file_extent_inline_start(ei);
9503 	write_extent_buffer(leaf, symname, ptr, name_len);
9504 	btrfs_mark_buffer_dirty(trans, leaf);
9505 	btrfs_free_path(path);
9506 
9507 	d_instantiate_new(dentry, inode);
9508 	err = 0;
9509 out:
9510 	btrfs_end_transaction(trans);
9511 	btrfs_btree_balance_dirty(fs_info);
9512 out_new_inode_args:
9513 	btrfs_new_inode_args_destroy(&new_inode_args);
9514 out_inode:
9515 	if (err)
9516 		iput(inode);
9517 	return err;
9518 }
9519 
9520 static struct btrfs_trans_handle *insert_prealloc_file_extent(
9521 				       struct btrfs_trans_handle *trans_in,
9522 				       struct btrfs_inode *inode,
9523 				       struct btrfs_key *ins,
9524 				       u64 file_offset)
9525 {
9526 	struct btrfs_file_extent_item stack_fi;
9527 	struct btrfs_replace_extent_info extent_info;
9528 	struct btrfs_trans_handle *trans = trans_in;
9529 	struct btrfs_path *path;
9530 	u64 start = ins->objectid;
9531 	u64 len = ins->offset;
9532 	u64 qgroup_released = 0;
9533 	int ret;
9534 
9535 	memset(&stack_fi, 0, sizeof(stack_fi));
9536 
9537 	btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_PREALLOC);
9538 	btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, start);
9539 	btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, len);
9540 	btrfs_set_stack_file_extent_num_bytes(&stack_fi, len);
9541 	btrfs_set_stack_file_extent_ram_bytes(&stack_fi, len);
9542 	btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE);
9543 	/* Encryption and other encoding is reserved and all 0 */
9544 
9545 	ret = btrfs_qgroup_release_data(inode, file_offset, len, &qgroup_released);
9546 	if (ret < 0)
9547 		return ERR_PTR(ret);
9548 
9549 	if (trans) {
9550 		ret = insert_reserved_file_extent(trans, inode,
9551 						  file_offset, &stack_fi,
9552 						  true, qgroup_released);
9553 		if (ret)
9554 			goto free_qgroup;
9555 		return trans;
9556 	}
9557 
9558 	extent_info.disk_offset = start;
9559 	extent_info.disk_len = len;
9560 	extent_info.data_offset = 0;
9561 	extent_info.data_len = len;
9562 	extent_info.file_offset = file_offset;
9563 	extent_info.extent_buf = (char *)&stack_fi;
9564 	extent_info.is_new_extent = true;
9565 	extent_info.update_times = true;
9566 	extent_info.qgroup_reserved = qgroup_released;
9567 	extent_info.insertions = 0;
9568 
9569 	path = btrfs_alloc_path();
9570 	if (!path) {
9571 		ret = -ENOMEM;
9572 		goto free_qgroup;
9573 	}
9574 
9575 	ret = btrfs_replace_file_extents(inode, path, file_offset,
9576 				     file_offset + len - 1, &extent_info,
9577 				     &trans);
9578 	btrfs_free_path(path);
9579 	if (ret)
9580 		goto free_qgroup;
9581 	return trans;
9582 
9583 free_qgroup:
9584 	/*
9585 	 * We have released qgroup data range at the beginning of the function,
9586 	 * and normally qgroup_released bytes will be freed when committing
9587 	 * transaction.
9588 	 * But if we error out early, we have to free what we have released
9589 	 * or we leak qgroup data reservation.
9590 	 */
9591 	btrfs_qgroup_free_refroot(inode->root->fs_info,
9592 			inode->root->root_key.objectid, qgroup_released,
9593 			BTRFS_QGROUP_RSV_DATA);
9594 	return ERR_PTR(ret);
9595 }
9596 
9597 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
9598 				       u64 start, u64 num_bytes, u64 min_size,
9599 				       loff_t actual_len, u64 *alloc_hint,
9600 				       struct btrfs_trans_handle *trans)
9601 {
9602 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
9603 	struct extent_map *em;
9604 	struct btrfs_root *root = BTRFS_I(inode)->root;
9605 	struct btrfs_key ins;
9606 	u64 cur_offset = start;
9607 	u64 clear_offset = start;
9608 	u64 i_size;
9609 	u64 cur_bytes;
9610 	u64 last_alloc = (u64)-1;
9611 	int ret = 0;
9612 	bool own_trans = true;
9613 	u64 end = start + num_bytes - 1;
9614 
9615 	if (trans)
9616 		own_trans = false;
9617 	while (num_bytes > 0) {
9618 		cur_bytes = min_t(u64, num_bytes, SZ_256M);
9619 		cur_bytes = max(cur_bytes, min_size);
9620 		/*
9621 		 * If we are severely fragmented we could end up with really
9622 		 * small allocations, so if the allocator is returning small
9623 		 * chunks lets make its job easier by only searching for those
9624 		 * sized chunks.
9625 		 */
9626 		cur_bytes = min(cur_bytes, last_alloc);
9627 		ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes,
9628 				min_size, 0, *alloc_hint, &ins, 1, 0);
9629 		if (ret)
9630 			break;
9631 
9632 		/*
9633 		 * We've reserved this space, and thus converted it from
9634 		 * ->bytes_may_use to ->bytes_reserved.  Any error that happens
9635 		 * from here on out we will only need to clear our reservation
9636 		 * for the remaining unreserved area, so advance our
9637 		 * clear_offset by our extent size.
9638 		 */
9639 		clear_offset += ins.offset;
9640 
9641 		last_alloc = ins.offset;
9642 		trans = insert_prealloc_file_extent(trans, BTRFS_I(inode),
9643 						    &ins, cur_offset);
9644 		/*
9645 		 * Now that we inserted the prealloc extent we can finally
9646 		 * decrement the number of reservations in the block group.
9647 		 * If we did it before, we could race with relocation and have
9648 		 * relocation miss the reserved extent, making it fail later.
9649 		 */
9650 		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
9651 		if (IS_ERR(trans)) {
9652 			ret = PTR_ERR(trans);
9653 			btrfs_free_reserved_extent(fs_info, ins.objectid,
9654 						   ins.offset, 0);
9655 			break;
9656 		}
9657 
9658 		em = alloc_extent_map();
9659 		if (!em) {
9660 			btrfs_drop_extent_map_range(BTRFS_I(inode), cur_offset,
9661 					    cur_offset + ins.offset - 1, false);
9662 			btrfs_set_inode_full_sync(BTRFS_I(inode));
9663 			goto next;
9664 		}
9665 
9666 		em->start = cur_offset;
9667 		em->orig_start = cur_offset;
9668 		em->len = ins.offset;
9669 		em->block_start = ins.objectid;
9670 		em->block_len = ins.offset;
9671 		em->orig_block_len = ins.offset;
9672 		em->ram_bytes = ins.offset;
9673 		em->flags |= EXTENT_FLAG_PREALLOC;
9674 		em->generation = trans->transid;
9675 
9676 		ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, true);
9677 		free_extent_map(em);
9678 next:
9679 		num_bytes -= ins.offset;
9680 		cur_offset += ins.offset;
9681 		*alloc_hint = ins.objectid + ins.offset;
9682 
9683 		inode_inc_iversion(inode);
9684 		inode_set_ctime_current(inode);
9685 		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
9686 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
9687 		    (actual_len > inode->i_size) &&
9688 		    (cur_offset > inode->i_size)) {
9689 			if (cur_offset > actual_len)
9690 				i_size = actual_len;
9691 			else
9692 				i_size = cur_offset;
9693 			i_size_write(inode, i_size);
9694 			btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
9695 		}
9696 
9697 		ret = btrfs_update_inode(trans, BTRFS_I(inode));
9698 
9699 		if (ret) {
9700 			btrfs_abort_transaction(trans, ret);
9701 			if (own_trans)
9702 				btrfs_end_transaction(trans);
9703 			break;
9704 		}
9705 
9706 		if (own_trans) {
9707 			btrfs_end_transaction(trans);
9708 			trans = NULL;
9709 		}
9710 	}
9711 	if (clear_offset < end)
9712 		btrfs_free_reserved_data_space(BTRFS_I(inode), NULL, clear_offset,
9713 			end - clear_offset + 1);
9714 	return ret;
9715 }
9716 
9717 int btrfs_prealloc_file_range(struct inode *inode, int mode,
9718 			      u64 start, u64 num_bytes, u64 min_size,
9719 			      loff_t actual_len, u64 *alloc_hint)
9720 {
9721 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9722 					   min_size, actual_len, alloc_hint,
9723 					   NULL);
9724 }
9725 
9726 int btrfs_prealloc_file_range_trans(struct inode *inode,
9727 				    struct btrfs_trans_handle *trans, int mode,
9728 				    u64 start, u64 num_bytes, u64 min_size,
9729 				    loff_t actual_len, u64 *alloc_hint)
9730 {
9731 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9732 					   min_size, actual_len, alloc_hint, trans);
9733 }
9734 
9735 static int btrfs_permission(struct mnt_idmap *idmap,
9736 			    struct inode *inode, int mask)
9737 {
9738 	struct btrfs_root *root = BTRFS_I(inode)->root;
9739 	umode_t mode = inode->i_mode;
9740 
9741 	if (mask & MAY_WRITE &&
9742 	    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
9743 		if (btrfs_root_readonly(root))
9744 			return -EROFS;
9745 		if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
9746 			return -EACCES;
9747 	}
9748 	return generic_permission(idmap, inode, mask);
9749 }
9750 
9751 static int btrfs_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
9752 			 struct file *file, umode_t mode)
9753 {
9754 	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
9755 	struct btrfs_trans_handle *trans;
9756 	struct btrfs_root *root = BTRFS_I(dir)->root;
9757 	struct inode *inode;
9758 	struct btrfs_new_inode_args new_inode_args = {
9759 		.dir = dir,
9760 		.dentry = file->f_path.dentry,
9761 		.orphan = true,
9762 	};
9763 	unsigned int trans_num_items;
9764 	int ret;
9765 
9766 	inode = new_inode(dir->i_sb);
9767 	if (!inode)
9768 		return -ENOMEM;
9769 	inode_init_owner(idmap, inode, dir, mode);
9770 	inode->i_fop = &btrfs_file_operations;
9771 	inode->i_op = &btrfs_file_inode_operations;
9772 	inode->i_mapping->a_ops = &btrfs_aops;
9773 
9774 	new_inode_args.inode = inode;
9775 	ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
9776 	if (ret)
9777 		goto out_inode;
9778 
9779 	trans = btrfs_start_transaction(root, trans_num_items);
9780 	if (IS_ERR(trans)) {
9781 		ret = PTR_ERR(trans);
9782 		goto out_new_inode_args;
9783 	}
9784 
9785 	ret = btrfs_create_new_inode(trans, &new_inode_args);
9786 
9787 	/*
9788 	 * We set number of links to 0 in btrfs_create_new_inode(), and here we
9789 	 * set it to 1 because d_tmpfile() will issue a warning if the count is
9790 	 * 0, through:
9791 	 *
9792 	 *    d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
9793 	 */
9794 	set_nlink(inode, 1);
9795 
9796 	if (!ret) {
9797 		d_tmpfile(file, inode);
9798 		unlock_new_inode(inode);
9799 		mark_inode_dirty(inode);
9800 	}
9801 
9802 	btrfs_end_transaction(trans);
9803 	btrfs_btree_balance_dirty(fs_info);
9804 out_new_inode_args:
9805 	btrfs_new_inode_args_destroy(&new_inode_args);
9806 out_inode:
9807 	if (ret)
9808 		iput(inode);
9809 	return finish_open_simple(file, ret);
9810 }
9811 
9812 void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end)
9813 {
9814 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
9815 	unsigned long index = start >> PAGE_SHIFT;
9816 	unsigned long end_index = end >> PAGE_SHIFT;
9817 	struct page *page;
9818 	u32 len;
9819 
9820 	ASSERT(end + 1 - start <= U32_MAX);
9821 	len = end + 1 - start;
9822 	while (index <= end_index) {
9823 		page = find_get_page(inode->vfs_inode.i_mapping, index);
9824 		ASSERT(page); /* Pages should be in the extent_io_tree */
9825 
9826 		/* This is for data, which doesn't yet support larger folio. */
9827 		ASSERT(folio_order(page_folio(page)) == 0);
9828 		btrfs_folio_set_writeback(fs_info, page_folio(page), start, len);
9829 		put_page(page);
9830 		index++;
9831 	}
9832 }
9833 
9834 int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
9835 					     int compress_type)
9836 {
9837 	switch (compress_type) {
9838 	case BTRFS_COMPRESS_NONE:
9839 		return BTRFS_ENCODED_IO_COMPRESSION_NONE;
9840 	case BTRFS_COMPRESS_ZLIB:
9841 		return BTRFS_ENCODED_IO_COMPRESSION_ZLIB;
9842 	case BTRFS_COMPRESS_LZO:
9843 		/*
9844 		 * The LZO format depends on the sector size. 64K is the maximum
9845 		 * sector size that we support.
9846 		 */
9847 		if (fs_info->sectorsize < SZ_4K || fs_info->sectorsize > SZ_64K)
9848 			return -EINVAL;
9849 		return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K +
9850 		       (fs_info->sectorsize_bits - 12);
9851 	case BTRFS_COMPRESS_ZSTD:
9852 		return BTRFS_ENCODED_IO_COMPRESSION_ZSTD;
9853 	default:
9854 		return -EUCLEAN;
9855 	}
9856 }
9857 
9858 static ssize_t btrfs_encoded_read_inline(
9859 				struct kiocb *iocb,
9860 				struct iov_iter *iter, u64 start,
9861 				u64 lockend,
9862 				struct extent_state **cached_state,
9863 				u64 extent_start, size_t count,
9864 				struct btrfs_ioctl_encoded_io_args *encoded,
9865 				bool *unlocked)
9866 {
9867 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
9868 	struct btrfs_root *root = inode->root;
9869 	struct btrfs_fs_info *fs_info = root->fs_info;
9870 	struct extent_io_tree *io_tree = &inode->io_tree;
9871 	struct btrfs_path *path;
9872 	struct extent_buffer *leaf;
9873 	struct btrfs_file_extent_item *item;
9874 	u64 ram_bytes;
9875 	unsigned long ptr;
9876 	void *tmp;
9877 	ssize_t ret;
9878 
9879 	path = btrfs_alloc_path();
9880 	if (!path) {
9881 		ret = -ENOMEM;
9882 		goto out;
9883 	}
9884 	ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
9885 				       extent_start, 0);
9886 	if (ret) {
9887 		if (ret > 0) {
9888 			/* The extent item disappeared? */
9889 			ret = -EIO;
9890 		}
9891 		goto out;
9892 	}
9893 	leaf = path->nodes[0];
9894 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
9895 
9896 	ram_bytes = btrfs_file_extent_ram_bytes(leaf, item);
9897 	ptr = btrfs_file_extent_inline_start(item);
9898 
9899 	encoded->len = min_t(u64, extent_start + ram_bytes,
9900 			     inode->vfs_inode.i_size) - iocb->ki_pos;
9901 	ret = btrfs_encoded_io_compression_from_extent(fs_info,
9902 				 btrfs_file_extent_compression(leaf, item));
9903 	if (ret < 0)
9904 		goto out;
9905 	encoded->compression = ret;
9906 	if (encoded->compression) {
9907 		size_t inline_size;
9908 
9909 		inline_size = btrfs_file_extent_inline_item_len(leaf,
9910 								path->slots[0]);
9911 		if (inline_size > count) {
9912 			ret = -ENOBUFS;
9913 			goto out;
9914 		}
9915 		count = inline_size;
9916 		encoded->unencoded_len = ram_bytes;
9917 		encoded->unencoded_offset = iocb->ki_pos - extent_start;
9918 	} else {
9919 		count = min_t(u64, count, encoded->len);
9920 		encoded->len = count;
9921 		encoded->unencoded_len = count;
9922 		ptr += iocb->ki_pos - extent_start;
9923 	}
9924 
9925 	tmp = kmalloc(count, GFP_NOFS);
9926 	if (!tmp) {
9927 		ret = -ENOMEM;
9928 		goto out;
9929 	}
9930 	read_extent_buffer(leaf, tmp, ptr, count);
9931 	btrfs_release_path(path);
9932 	unlock_extent(io_tree, start, lockend, cached_state);
9933 	btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9934 	*unlocked = true;
9935 
9936 	ret = copy_to_iter(tmp, count, iter);
9937 	if (ret != count)
9938 		ret = -EFAULT;
9939 	kfree(tmp);
9940 out:
9941 	btrfs_free_path(path);
9942 	return ret;
9943 }
9944 
9945 struct btrfs_encoded_read_private {
9946 	wait_queue_head_t wait;
9947 	atomic_t pending;
9948 	blk_status_t status;
9949 };
9950 
9951 static void btrfs_encoded_read_endio(struct btrfs_bio *bbio)
9952 {
9953 	struct btrfs_encoded_read_private *priv = bbio->private;
9954 
9955 	if (bbio->bio.bi_status) {
9956 		/*
9957 		 * The memory barrier implied by the atomic_dec_return() here
9958 		 * pairs with the memory barrier implied by the
9959 		 * atomic_dec_return() or io_wait_event() in
9960 		 * btrfs_encoded_read_regular_fill_pages() to ensure that this
9961 		 * write is observed before the load of status in
9962 		 * btrfs_encoded_read_regular_fill_pages().
9963 		 */
9964 		WRITE_ONCE(priv->status, bbio->bio.bi_status);
9965 	}
9966 	if (!atomic_dec_return(&priv->pending))
9967 		wake_up(&priv->wait);
9968 	bio_put(&bbio->bio);
9969 }
9970 
9971 int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
9972 					  u64 file_offset, u64 disk_bytenr,
9973 					  u64 disk_io_size, struct page **pages)
9974 {
9975 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
9976 	struct btrfs_encoded_read_private priv = {
9977 		.pending = ATOMIC_INIT(1),
9978 	};
9979 	unsigned long i = 0;
9980 	struct btrfs_bio *bbio;
9981 
9982 	init_waitqueue_head(&priv.wait);
9983 
9984 	bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
9985 			       btrfs_encoded_read_endio, &priv);
9986 	bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
9987 	bbio->inode = inode;
9988 
9989 	do {
9990 		size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE);
9991 
9992 		if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) {
9993 			atomic_inc(&priv.pending);
9994 			btrfs_submit_bio(bbio, 0);
9995 
9996 			bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
9997 					       btrfs_encoded_read_endio, &priv);
9998 			bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
9999 			bbio->inode = inode;
10000 			continue;
10001 		}
10002 
10003 		i++;
10004 		disk_bytenr += bytes;
10005 		disk_io_size -= bytes;
10006 	} while (disk_io_size);
10007 
10008 	atomic_inc(&priv.pending);
10009 	btrfs_submit_bio(bbio, 0);
10010 
10011 	if (atomic_dec_return(&priv.pending))
10012 		io_wait_event(priv.wait, !atomic_read(&priv.pending));
10013 	/* See btrfs_encoded_read_endio() for ordering. */
10014 	return blk_status_to_errno(READ_ONCE(priv.status));
10015 }
10016 
10017 static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb,
10018 					  struct iov_iter *iter,
10019 					  u64 start, u64 lockend,
10020 					  struct extent_state **cached_state,
10021 					  u64 disk_bytenr, u64 disk_io_size,
10022 					  size_t count, bool compressed,
10023 					  bool *unlocked)
10024 {
10025 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
10026 	struct extent_io_tree *io_tree = &inode->io_tree;
10027 	struct page **pages;
10028 	unsigned long nr_pages, i;
10029 	u64 cur;
10030 	size_t page_offset;
10031 	ssize_t ret;
10032 
10033 	nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE);
10034 	pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
10035 	if (!pages)
10036 		return -ENOMEM;
10037 	ret = btrfs_alloc_page_array(nr_pages, pages, 0);
10038 	if (ret) {
10039 		ret = -ENOMEM;
10040 		goto out;
10041 		}
10042 
10043 	ret = btrfs_encoded_read_regular_fill_pages(inode, start, disk_bytenr,
10044 						    disk_io_size, pages);
10045 	if (ret)
10046 		goto out;
10047 
10048 	unlock_extent(io_tree, start, lockend, cached_state);
10049 	btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
10050 	*unlocked = true;
10051 
10052 	if (compressed) {
10053 		i = 0;
10054 		page_offset = 0;
10055 	} else {
10056 		i = (iocb->ki_pos - start) >> PAGE_SHIFT;
10057 		page_offset = (iocb->ki_pos - start) & (PAGE_SIZE - 1);
10058 	}
10059 	cur = 0;
10060 	while (cur < count) {
10061 		size_t bytes = min_t(size_t, count - cur,
10062 				     PAGE_SIZE - page_offset);
10063 
10064 		if (copy_page_to_iter(pages[i], page_offset, bytes,
10065 				      iter) != bytes) {
10066 			ret = -EFAULT;
10067 			goto out;
10068 		}
10069 		i++;
10070 		cur += bytes;
10071 		page_offset = 0;
10072 	}
10073 	ret = count;
10074 out:
10075 	for (i = 0; i < nr_pages; i++) {
10076 		if (pages[i])
10077 			__free_page(pages[i]);
10078 	}
10079 	kfree(pages);
10080 	return ret;
10081 }
10082 
10083 ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
10084 			   struct btrfs_ioctl_encoded_io_args *encoded)
10085 {
10086 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
10087 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
10088 	struct extent_io_tree *io_tree = &inode->io_tree;
10089 	ssize_t ret;
10090 	size_t count = iov_iter_count(iter);
10091 	u64 start, lockend, disk_bytenr, disk_io_size;
10092 	struct extent_state *cached_state = NULL;
10093 	struct extent_map *em;
10094 	bool unlocked = false;
10095 
10096 	file_accessed(iocb->ki_filp);
10097 
10098 	btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
10099 
10100 	if (iocb->ki_pos >= inode->vfs_inode.i_size) {
10101 		btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
10102 		return 0;
10103 	}
10104 	start = ALIGN_DOWN(iocb->ki_pos, fs_info->sectorsize);
10105 	/*
10106 	 * We don't know how long the extent containing iocb->ki_pos is, but if
10107 	 * it's compressed we know that it won't be longer than this.
10108 	 */
10109 	lockend = start + BTRFS_MAX_UNCOMPRESSED - 1;
10110 
10111 	for (;;) {
10112 		struct btrfs_ordered_extent *ordered;
10113 
10114 		ret = btrfs_wait_ordered_range(&inode->vfs_inode, start,
10115 					       lockend - start + 1);
10116 		if (ret)
10117 			goto out_unlock_inode;
10118 		lock_extent(io_tree, start, lockend, &cached_state);
10119 		ordered = btrfs_lookup_ordered_range(inode, start,
10120 						     lockend - start + 1);
10121 		if (!ordered)
10122 			break;
10123 		btrfs_put_ordered_extent(ordered);
10124 		unlock_extent(io_tree, start, lockend, &cached_state);
10125 		cond_resched();
10126 	}
10127 
10128 	em = btrfs_get_extent(inode, NULL, 0, start, lockend - start + 1);
10129 	if (IS_ERR(em)) {
10130 		ret = PTR_ERR(em);
10131 		goto out_unlock_extent;
10132 	}
10133 
10134 	if (em->block_start == EXTENT_MAP_INLINE) {
10135 		u64 extent_start = em->start;
10136 
10137 		/*
10138 		 * For inline extents we get everything we need out of the
10139 		 * extent item.
10140 		 */
10141 		free_extent_map(em);
10142 		em = NULL;
10143 		ret = btrfs_encoded_read_inline(iocb, iter, start, lockend,
10144 						&cached_state, extent_start,
10145 						count, encoded, &unlocked);
10146 		goto out;
10147 	}
10148 
10149 	/*
10150 	 * We only want to return up to EOF even if the extent extends beyond
10151 	 * that.
10152 	 */
10153 	encoded->len = min_t(u64, extent_map_end(em),
10154 			     inode->vfs_inode.i_size) - iocb->ki_pos;
10155 	if (em->block_start == EXTENT_MAP_HOLE ||
10156 	    (em->flags & EXTENT_FLAG_PREALLOC)) {
10157 		disk_bytenr = EXTENT_MAP_HOLE;
10158 		count = min_t(u64, count, encoded->len);
10159 		encoded->len = count;
10160 		encoded->unencoded_len = count;
10161 	} else if (extent_map_is_compressed(em)) {
10162 		disk_bytenr = em->block_start;
10163 		/*
10164 		 * Bail if the buffer isn't large enough to return the whole
10165 		 * compressed extent.
10166 		 */
10167 		if (em->block_len > count) {
10168 			ret = -ENOBUFS;
10169 			goto out_em;
10170 		}
10171 		disk_io_size = em->block_len;
10172 		count = em->block_len;
10173 		encoded->unencoded_len = em->ram_bytes;
10174 		encoded->unencoded_offset = iocb->ki_pos - em->orig_start;
10175 		ret = btrfs_encoded_io_compression_from_extent(fs_info,
10176 							       extent_map_compression(em));
10177 		if (ret < 0)
10178 			goto out_em;
10179 		encoded->compression = ret;
10180 	} else {
10181 		disk_bytenr = em->block_start + (start - em->start);
10182 		if (encoded->len > count)
10183 			encoded->len = count;
10184 		/*
10185 		 * Don't read beyond what we locked. This also limits the page
10186 		 * allocations that we'll do.
10187 		 */
10188 		disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start;
10189 		count = start + disk_io_size - iocb->ki_pos;
10190 		encoded->len = count;
10191 		encoded->unencoded_len = count;
10192 		disk_io_size = ALIGN(disk_io_size, fs_info->sectorsize);
10193 	}
10194 	free_extent_map(em);
10195 	em = NULL;
10196 
10197 	if (disk_bytenr == EXTENT_MAP_HOLE) {
10198 		unlock_extent(io_tree, start, lockend, &cached_state);
10199 		btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
10200 		unlocked = true;
10201 		ret = iov_iter_zero(count, iter);
10202 		if (ret != count)
10203 			ret = -EFAULT;
10204 	} else {
10205 		ret = btrfs_encoded_read_regular(iocb, iter, start, lockend,
10206 						 &cached_state, disk_bytenr,
10207 						 disk_io_size, count,
10208 						 encoded->compression,
10209 						 &unlocked);
10210 	}
10211 
10212 out:
10213 	if (ret >= 0)
10214 		iocb->ki_pos += encoded->len;
10215 out_em:
10216 	free_extent_map(em);
10217 out_unlock_extent:
10218 	if (!unlocked)
10219 		unlock_extent(io_tree, start, lockend, &cached_state);
10220 out_unlock_inode:
10221 	if (!unlocked)
10222 		btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
10223 	return ret;
10224 }
10225 
10226 ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
10227 			       const struct btrfs_ioctl_encoded_io_args *encoded)
10228 {
10229 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
10230 	struct btrfs_root *root = inode->root;
10231 	struct btrfs_fs_info *fs_info = root->fs_info;
10232 	struct extent_io_tree *io_tree = &inode->io_tree;
10233 	struct extent_changeset *data_reserved = NULL;
10234 	struct extent_state *cached_state = NULL;
10235 	struct btrfs_ordered_extent *ordered;
10236 	int compression;
10237 	size_t orig_count;
10238 	u64 start, end;
10239 	u64 num_bytes, ram_bytes, disk_num_bytes;
10240 	unsigned long nr_pages, i;
10241 	struct page **pages;
10242 	struct btrfs_key ins;
10243 	bool extent_reserved = false;
10244 	struct extent_map *em;
10245 	ssize_t ret;
10246 
10247 	switch (encoded->compression) {
10248 	case BTRFS_ENCODED_IO_COMPRESSION_ZLIB:
10249 		compression = BTRFS_COMPRESS_ZLIB;
10250 		break;
10251 	case BTRFS_ENCODED_IO_COMPRESSION_ZSTD:
10252 		compression = BTRFS_COMPRESS_ZSTD;
10253 		break;
10254 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K:
10255 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K:
10256 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K:
10257 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K:
10258 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K:
10259 		/* The sector size must match for LZO. */
10260 		if (encoded->compression -
10261 		    BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 12 !=
10262 		    fs_info->sectorsize_bits)
10263 			return -EINVAL;
10264 		compression = BTRFS_COMPRESS_LZO;
10265 		break;
10266 	default:
10267 		return -EINVAL;
10268 	}
10269 	if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE)
10270 		return -EINVAL;
10271 
10272 	orig_count = iov_iter_count(from);
10273 
10274 	/* The extent size must be sane. */
10275 	if (encoded->unencoded_len > BTRFS_MAX_UNCOMPRESSED ||
10276 	    orig_count > BTRFS_MAX_COMPRESSED || orig_count == 0)
10277 		return -EINVAL;
10278 
10279 	/*
10280 	 * The compressed data must be smaller than the decompressed data.
10281 	 *
10282 	 * It's of course possible for data to compress to larger or the same
10283 	 * size, but the buffered I/O path falls back to no compression for such
10284 	 * data, and we don't want to break any assumptions by creating these
10285 	 * extents.
10286 	 *
10287 	 * Note that this is less strict than the current check we have that the
10288 	 * compressed data must be at least one sector smaller than the
10289 	 * decompressed data. We only want to enforce the weaker requirement
10290 	 * from old kernels that it is at least one byte smaller.
10291 	 */
10292 	if (orig_count >= encoded->unencoded_len)
10293 		return -EINVAL;
10294 
10295 	/* The extent must start on a sector boundary. */
10296 	start = iocb->ki_pos;
10297 	if (!IS_ALIGNED(start, fs_info->sectorsize))
10298 		return -EINVAL;
10299 
10300 	/*
10301 	 * The extent must end on a sector boundary. However, we allow a write
10302 	 * which ends at or extends i_size to have an unaligned length; we round
10303 	 * up the extent size and set i_size to the unaligned end.
10304 	 */
10305 	if (start + encoded->len < inode->vfs_inode.i_size &&
10306 	    !IS_ALIGNED(start + encoded->len, fs_info->sectorsize))
10307 		return -EINVAL;
10308 
10309 	/* Finally, the offset in the unencoded data must be sector-aligned. */
10310 	if (!IS_ALIGNED(encoded->unencoded_offset, fs_info->sectorsize))
10311 		return -EINVAL;
10312 
10313 	num_bytes = ALIGN(encoded->len, fs_info->sectorsize);
10314 	ram_bytes = ALIGN(encoded->unencoded_len, fs_info->sectorsize);
10315 	end = start + num_bytes - 1;
10316 
10317 	/*
10318 	 * If the extent cannot be inline, the compressed data on disk must be
10319 	 * sector-aligned. For convenience, we extend it with zeroes if it
10320 	 * isn't.
10321 	 */
10322 	disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize);
10323 	nr_pages = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE);
10324 	pages = kvcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL_ACCOUNT);
10325 	if (!pages)
10326 		return -ENOMEM;
10327 	for (i = 0; i < nr_pages; i++) {
10328 		size_t bytes = min_t(size_t, PAGE_SIZE, iov_iter_count(from));
10329 		char *kaddr;
10330 
10331 		pages[i] = alloc_page(GFP_KERNEL_ACCOUNT);
10332 		if (!pages[i]) {
10333 			ret = -ENOMEM;
10334 			goto out_pages;
10335 		}
10336 		kaddr = kmap_local_page(pages[i]);
10337 		if (copy_from_iter(kaddr, bytes, from) != bytes) {
10338 			kunmap_local(kaddr);
10339 			ret = -EFAULT;
10340 			goto out_pages;
10341 		}
10342 		if (bytes < PAGE_SIZE)
10343 			memset(kaddr + bytes, 0, PAGE_SIZE - bytes);
10344 		kunmap_local(kaddr);
10345 	}
10346 
10347 	for (;;) {
10348 		struct btrfs_ordered_extent *ordered;
10349 
10350 		ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, num_bytes);
10351 		if (ret)
10352 			goto out_pages;
10353 		ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping,
10354 						    start >> PAGE_SHIFT,
10355 						    end >> PAGE_SHIFT);
10356 		if (ret)
10357 			goto out_pages;
10358 		lock_extent(io_tree, start, end, &cached_state);
10359 		ordered = btrfs_lookup_ordered_range(inode, start, num_bytes);
10360 		if (!ordered &&
10361 		    !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end))
10362 			break;
10363 		if (ordered)
10364 			btrfs_put_ordered_extent(ordered);
10365 		unlock_extent(io_tree, start, end, &cached_state);
10366 		cond_resched();
10367 	}
10368 
10369 	/*
10370 	 * We don't use the higher-level delalloc space functions because our
10371 	 * num_bytes and disk_num_bytes are different.
10372 	 */
10373 	ret = btrfs_alloc_data_chunk_ondemand(inode, disk_num_bytes);
10374 	if (ret)
10375 		goto out_unlock;
10376 	ret = btrfs_qgroup_reserve_data(inode, &data_reserved, start, num_bytes);
10377 	if (ret)
10378 		goto out_free_data_space;
10379 	ret = btrfs_delalloc_reserve_metadata(inode, num_bytes, disk_num_bytes,
10380 					      false);
10381 	if (ret)
10382 		goto out_qgroup_free_data;
10383 
10384 	/* Try an inline extent first. */
10385 	if (start == 0 && encoded->unencoded_len == encoded->len &&
10386 	    encoded->unencoded_offset == 0) {
10387 		ret = cow_file_range_inline(inode, encoded->len, orig_count,
10388 					    compression, pages, true);
10389 		if (ret <= 0) {
10390 			if (ret == 0)
10391 				ret = orig_count;
10392 			goto out_delalloc_release;
10393 		}
10394 	}
10395 
10396 	ret = btrfs_reserve_extent(root, disk_num_bytes, disk_num_bytes,
10397 				   disk_num_bytes, 0, 0, &ins, 1, 1);
10398 	if (ret)
10399 		goto out_delalloc_release;
10400 	extent_reserved = true;
10401 
10402 	em = create_io_em(inode, start, num_bytes,
10403 			  start - encoded->unencoded_offset, ins.objectid,
10404 			  ins.offset, ins.offset, ram_bytes, compression,
10405 			  BTRFS_ORDERED_COMPRESSED);
10406 	if (IS_ERR(em)) {
10407 		ret = PTR_ERR(em);
10408 		goto out_free_reserved;
10409 	}
10410 	free_extent_map(em);
10411 
10412 	ordered = btrfs_alloc_ordered_extent(inode, start, num_bytes, ram_bytes,
10413 				       ins.objectid, ins.offset,
10414 				       encoded->unencoded_offset,
10415 				       (1 << BTRFS_ORDERED_ENCODED) |
10416 				       (1 << BTRFS_ORDERED_COMPRESSED),
10417 				       compression);
10418 	if (IS_ERR(ordered)) {
10419 		btrfs_drop_extent_map_range(inode, start, end, false);
10420 		ret = PTR_ERR(ordered);
10421 		goto out_free_reserved;
10422 	}
10423 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10424 
10425 	if (start + encoded->len > inode->vfs_inode.i_size)
10426 		i_size_write(&inode->vfs_inode, start + encoded->len);
10427 
10428 	unlock_extent(io_tree, start, end, &cached_state);
10429 
10430 	btrfs_delalloc_release_extents(inode, num_bytes);
10431 
10432 	btrfs_submit_compressed_write(ordered, pages, nr_pages, 0, false);
10433 	ret = orig_count;
10434 	goto out;
10435 
10436 out_free_reserved:
10437 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10438 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
10439 out_delalloc_release:
10440 	btrfs_delalloc_release_extents(inode, num_bytes);
10441 	btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0);
10442 out_qgroup_free_data:
10443 	if (ret < 0)
10444 		btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes, NULL);
10445 out_free_data_space:
10446 	/*
10447 	 * If btrfs_reserve_extent() succeeded, then we already decremented
10448 	 * bytes_may_use.
10449 	 */
10450 	if (!extent_reserved)
10451 		btrfs_free_reserved_data_space_noquota(fs_info, disk_num_bytes);
10452 out_unlock:
10453 	unlock_extent(io_tree, start, end, &cached_state);
10454 out_pages:
10455 	for (i = 0; i < nr_pages; i++) {
10456 		if (pages[i])
10457 			__free_page(pages[i]);
10458 	}
10459 	kvfree(pages);
10460 out:
10461 	if (ret >= 0)
10462 		iocb->ki_pos += encoded->len;
10463 	return ret;
10464 }
10465 
10466 #ifdef CONFIG_SWAP
10467 /*
10468  * Add an entry indicating a block group or device which is pinned by a
10469  * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a
10470  * negative errno on failure.
10471  */
10472 static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr,
10473 				  bool is_block_group)
10474 {
10475 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
10476 	struct btrfs_swapfile_pin *sp, *entry;
10477 	struct rb_node **p;
10478 	struct rb_node *parent = NULL;
10479 
10480 	sp = kmalloc(sizeof(*sp), GFP_NOFS);
10481 	if (!sp)
10482 		return -ENOMEM;
10483 	sp->ptr = ptr;
10484 	sp->inode = inode;
10485 	sp->is_block_group = is_block_group;
10486 	sp->bg_extent_count = 1;
10487 
10488 	spin_lock(&fs_info->swapfile_pins_lock);
10489 	p = &fs_info->swapfile_pins.rb_node;
10490 	while (*p) {
10491 		parent = *p;
10492 		entry = rb_entry(parent, struct btrfs_swapfile_pin, node);
10493 		if (sp->ptr < entry->ptr ||
10494 		    (sp->ptr == entry->ptr && sp->inode < entry->inode)) {
10495 			p = &(*p)->rb_left;
10496 		} else if (sp->ptr > entry->ptr ||
10497 			   (sp->ptr == entry->ptr && sp->inode > entry->inode)) {
10498 			p = &(*p)->rb_right;
10499 		} else {
10500 			if (is_block_group)
10501 				entry->bg_extent_count++;
10502 			spin_unlock(&fs_info->swapfile_pins_lock);
10503 			kfree(sp);
10504 			return 1;
10505 		}
10506 	}
10507 	rb_link_node(&sp->node, parent, p);
10508 	rb_insert_color(&sp->node, &fs_info->swapfile_pins);
10509 	spin_unlock(&fs_info->swapfile_pins_lock);
10510 	return 0;
10511 }
10512 
10513 /* Free all of the entries pinned by this swapfile. */
10514 static void btrfs_free_swapfile_pins(struct inode *inode)
10515 {
10516 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
10517 	struct btrfs_swapfile_pin *sp;
10518 	struct rb_node *node, *next;
10519 
10520 	spin_lock(&fs_info->swapfile_pins_lock);
10521 	node = rb_first(&fs_info->swapfile_pins);
10522 	while (node) {
10523 		next = rb_next(node);
10524 		sp = rb_entry(node, struct btrfs_swapfile_pin, node);
10525 		if (sp->inode == inode) {
10526 			rb_erase(&sp->node, &fs_info->swapfile_pins);
10527 			if (sp->is_block_group) {
10528 				btrfs_dec_block_group_swap_extents(sp->ptr,
10529 							   sp->bg_extent_count);
10530 				btrfs_put_block_group(sp->ptr);
10531 			}
10532 			kfree(sp);
10533 		}
10534 		node = next;
10535 	}
10536 	spin_unlock(&fs_info->swapfile_pins_lock);
10537 }
10538 
10539 struct btrfs_swap_info {
10540 	u64 start;
10541 	u64 block_start;
10542 	u64 block_len;
10543 	u64 lowest_ppage;
10544 	u64 highest_ppage;
10545 	unsigned long nr_pages;
10546 	int nr_extents;
10547 };
10548 
10549 static int btrfs_add_swap_extent(struct swap_info_struct *sis,
10550 				 struct btrfs_swap_info *bsi)
10551 {
10552 	unsigned long nr_pages;
10553 	unsigned long max_pages;
10554 	u64 first_ppage, first_ppage_reported, next_ppage;
10555 	int ret;
10556 
10557 	/*
10558 	 * Our swapfile may have had its size extended after the swap header was
10559 	 * written. In that case activating the swapfile should not go beyond
10560 	 * the max size set in the swap header.
10561 	 */
10562 	if (bsi->nr_pages >= sis->max)
10563 		return 0;
10564 
10565 	max_pages = sis->max - bsi->nr_pages;
10566 	first_ppage = PAGE_ALIGN(bsi->block_start) >> PAGE_SHIFT;
10567 	next_ppage = PAGE_ALIGN_DOWN(bsi->block_start + bsi->block_len) >> PAGE_SHIFT;
10568 
10569 	if (first_ppage >= next_ppage)
10570 		return 0;
10571 	nr_pages = next_ppage - first_ppage;
10572 	nr_pages = min(nr_pages, max_pages);
10573 
10574 	first_ppage_reported = first_ppage;
10575 	if (bsi->start == 0)
10576 		first_ppage_reported++;
10577 	if (bsi->lowest_ppage > first_ppage_reported)
10578 		bsi->lowest_ppage = first_ppage_reported;
10579 	if (bsi->highest_ppage < (next_ppage - 1))
10580 		bsi->highest_ppage = next_ppage - 1;
10581 
10582 	ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage);
10583 	if (ret < 0)
10584 		return ret;
10585 	bsi->nr_extents += ret;
10586 	bsi->nr_pages += nr_pages;
10587 	return 0;
10588 }
10589 
10590 static void btrfs_swap_deactivate(struct file *file)
10591 {
10592 	struct inode *inode = file_inode(file);
10593 
10594 	btrfs_free_swapfile_pins(inode);
10595 	atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles);
10596 }
10597 
10598 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
10599 			       sector_t *span)
10600 {
10601 	struct inode *inode = file_inode(file);
10602 	struct btrfs_root *root = BTRFS_I(inode)->root;
10603 	struct btrfs_fs_info *fs_info = root->fs_info;
10604 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
10605 	struct extent_state *cached_state = NULL;
10606 	struct extent_map *em = NULL;
10607 	struct btrfs_chunk_map *map = NULL;
10608 	struct btrfs_device *device = NULL;
10609 	struct btrfs_swap_info bsi = {
10610 		.lowest_ppage = (sector_t)-1ULL,
10611 	};
10612 	int ret = 0;
10613 	u64 isize;
10614 	u64 start;
10615 
10616 	/*
10617 	 * If the swap file was just created, make sure delalloc is done. If the
10618 	 * file changes again after this, the user is doing something stupid and
10619 	 * we don't really care.
10620 	 */
10621 	ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
10622 	if (ret)
10623 		return ret;
10624 
10625 	/*
10626 	 * The inode is locked, so these flags won't change after we check them.
10627 	 */
10628 	if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) {
10629 		btrfs_warn(fs_info, "swapfile must not be compressed");
10630 		return -EINVAL;
10631 	}
10632 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) {
10633 		btrfs_warn(fs_info, "swapfile must not be copy-on-write");
10634 		return -EINVAL;
10635 	}
10636 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
10637 		btrfs_warn(fs_info, "swapfile must not be checksummed");
10638 		return -EINVAL;
10639 	}
10640 
10641 	/*
10642 	 * Balance or device remove/replace/resize can move stuff around from
10643 	 * under us. The exclop protection makes sure they aren't running/won't
10644 	 * run concurrently while we are mapping the swap extents, and
10645 	 * fs_info->swapfile_pins prevents them from running while the swap
10646 	 * file is active and moving the extents. Note that this also prevents
10647 	 * a concurrent device add which isn't actually necessary, but it's not
10648 	 * really worth the trouble to allow it.
10649 	 */
10650 	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) {
10651 		btrfs_warn(fs_info,
10652 	   "cannot activate swapfile while exclusive operation is running");
10653 		return -EBUSY;
10654 	}
10655 
10656 	/*
10657 	 * Prevent snapshot creation while we are activating the swap file.
10658 	 * We do not want to race with snapshot creation. If snapshot creation
10659 	 * already started before we bumped nr_swapfiles from 0 to 1 and
10660 	 * completes before the first write into the swap file after it is
10661 	 * activated, than that write would fallback to COW.
10662 	 */
10663 	if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) {
10664 		btrfs_exclop_finish(fs_info);
10665 		btrfs_warn(fs_info,
10666 	   "cannot activate swapfile because snapshot creation is in progress");
10667 		return -EINVAL;
10668 	}
10669 	/*
10670 	 * Snapshots can create extents which require COW even if NODATACOW is
10671 	 * set. We use this counter to prevent snapshots. We must increment it
10672 	 * before walking the extents because we don't want a concurrent
10673 	 * snapshot to run after we've already checked the extents.
10674 	 *
10675 	 * It is possible that subvolume is marked for deletion but still not
10676 	 * removed yet. To prevent this race, we check the root status before
10677 	 * activating the swapfile.
10678 	 */
10679 	spin_lock(&root->root_item_lock);
10680 	if (btrfs_root_dead(root)) {
10681 		spin_unlock(&root->root_item_lock);
10682 
10683 		btrfs_exclop_finish(fs_info);
10684 		btrfs_warn(fs_info,
10685 		"cannot activate swapfile because subvolume %llu is being deleted",
10686 			root->root_key.objectid);
10687 		return -EPERM;
10688 	}
10689 	atomic_inc(&root->nr_swapfiles);
10690 	spin_unlock(&root->root_item_lock);
10691 
10692 	isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
10693 
10694 	lock_extent(io_tree, 0, isize - 1, &cached_state);
10695 	start = 0;
10696 	while (start < isize) {
10697 		u64 logical_block_start, physical_block_start;
10698 		struct btrfs_block_group *bg;
10699 		u64 len = isize - start;
10700 
10701 		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len);
10702 		if (IS_ERR(em)) {
10703 			ret = PTR_ERR(em);
10704 			goto out;
10705 		}
10706 
10707 		if (em->block_start == EXTENT_MAP_HOLE) {
10708 			btrfs_warn(fs_info, "swapfile must not have holes");
10709 			ret = -EINVAL;
10710 			goto out;
10711 		}
10712 		if (em->block_start == EXTENT_MAP_INLINE) {
10713 			/*
10714 			 * It's unlikely we'll ever actually find ourselves
10715 			 * here, as a file small enough to fit inline won't be
10716 			 * big enough to store more than the swap header, but in
10717 			 * case something changes in the future, let's catch it
10718 			 * here rather than later.
10719 			 */
10720 			btrfs_warn(fs_info, "swapfile must not be inline");
10721 			ret = -EINVAL;
10722 			goto out;
10723 		}
10724 		if (extent_map_is_compressed(em)) {
10725 			btrfs_warn(fs_info, "swapfile must not be compressed");
10726 			ret = -EINVAL;
10727 			goto out;
10728 		}
10729 
10730 		logical_block_start = em->block_start + (start - em->start);
10731 		len = min(len, em->len - (start - em->start));
10732 		free_extent_map(em);
10733 		em = NULL;
10734 
10735 		ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL, false, true);
10736 		if (ret < 0) {
10737 			goto out;
10738 		} else if (ret) {
10739 			ret = 0;
10740 		} else {
10741 			btrfs_warn(fs_info,
10742 				   "swapfile must not be copy-on-write");
10743 			ret = -EINVAL;
10744 			goto out;
10745 		}
10746 
10747 		map = btrfs_get_chunk_map(fs_info, logical_block_start, len);
10748 		if (IS_ERR(map)) {
10749 			ret = PTR_ERR(map);
10750 			goto out;
10751 		}
10752 
10753 		if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
10754 			btrfs_warn(fs_info,
10755 				   "swapfile must have single data profile");
10756 			ret = -EINVAL;
10757 			goto out;
10758 		}
10759 
10760 		if (device == NULL) {
10761 			device = map->stripes[0].dev;
10762 			ret = btrfs_add_swapfile_pin(inode, device, false);
10763 			if (ret == 1)
10764 				ret = 0;
10765 			else if (ret)
10766 				goto out;
10767 		} else if (device != map->stripes[0].dev) {
10768 			btrfs_warn(fs_info, "swapfile must be on one device");
10769 			ret = -EINVAL;
10770 			goto out;
10771 		}
10772 
10773 		physical_block_start = (map->stripes[0].physical +
10774 					(logical_block_start - map->start));
10775 		len = min(len, map->chunk_len - (logical_block_start - map->start));
10776 		btrfs_free_chunk_map(map);
10777 		map = NULL;
10778 
10779 		bg = btrfs_lookup_block_group(fs_info, logical_block_start);
10780 		if (!bg) {
10781 			btrfs_warn(fs_info,
10782 			   "could not find block group containing swapfile");
10783 			ret = -EINVAL;
10784 			goto out;
10785 		}
10786 
10787 		if (!btrfs_inc_block_group_swap_extents(bg)) {
10788 			btrfs_warn(fs_info,
10789 			   "block group for swapfile at %llu is read-only%s",
10790 			   bg->start,
10791 			   atomic_read(&fs_info->scrubs_running) ?
10792 				       " (scrub running)" : "");
10793 			btrfs_put_block_group(bg);
10794 			ret = -EINVAL;
10795 			goto out;
10796 		}
10797 
10798 		ret = btrfs_add_swapfile_pin(inode, bg, true);
10799 		if (ret) {
10800 			btrfs_put_block_group(bg);
10801 			if (ret == 1)
10802 				ret = 0;
10803 			else
10804 				goto out;
10805 		}
10806 
10807 		if (bsi.block_len &&
10808 		    bsi.block_start + bsi.block_len == physical_block_start) {
10809 			bsi.block_len += len;
10810 		} else {
10811 			if (bsi.block_len) {
10812 				ret = btrfs_add_swap_extent(sis, &bsi);
10813 				if (ret)
10814 					goto out;
10815 			}
10816 			bsi.start = start;
10817 			bsi.block_start = physical_block_start;
10818 			bsi.block_len = len;
10819 		}
10820 
10821 		start += len;
10822 	}
10823 
10824 	if (bsi.block_len)
10825 		ret = btrfs_add_swap_extent(sis, &bsi);
10826 
10827 out:
10828 	if (!IS_ERR_OR_NULL(em))
10829 		free_extent_map(em);
10830 	if (!IS_ERR_OR_NULL(map))
10831 		btrfs_free_chunk_map(map);
10832 
10833 	unlock_extent(io_tree, 0, isize - 1, &cached_state);
10834 
10835 	if (ret)
10836 		btrfs_swap_deactivate(file);
10837 
10838 	btrfs_drew_write_unlock(&root->snapshot_lock);
10839 
10840 	btrfs_exclop_finish(fs_info);
10841 
10842 	if (ret)
10843 		return ret;
10844 
10845 	if (device)
10846 		sis->bdev = device->bdev;
10847 	*span = bsi.highest_ppage - bsi.lowest_ppage + 1;
10848 	sis->max = bsi.nr_pages;
10849 	sis->pages = bsi.nr_pages - 1;
10850 	sis->highest_bit = bsi.nr_pages - 1;
10851 	return bsi.nr_extents;
10852 }
10853 #else
10854 static void btrfs_swap_deactivate(struct file *file)
10855 {
10856 }
10857 
10858 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
10859 			       sector_t *span)
10860 {
10861 	return -EOPNOTSUPP;
10862 }
10863 #endif
10864 
10865 /*
10866  * Update the number of bytes used in the VFS' inode. When we replace extents in
10867  * a range (clone, dedupe, fallocate's zero range), we must update the number of
10868  * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls
10869  * always get a correct value.
10870  */
10871 void btrfs_update_inode_bytes(struct btrfs_inode *inode,
10872 			      const u64 add_bytes,
10873 			      const u64 del_bytes)
10874 {
10875 	if (add_bytes == del_bytes)
10876 		return;
10877 
10878 	spin_lock(&inode->lock);
10879 	if (del_bytes > 0)
10880 		inode_sub_bytes(&inode->vfs_inode, del_bytes);
10881 	if (add_bytes > 0)
10882 		inode_add_bytes(&inode->vfs_inode, add_bytes);
10883 	spin_unlock(&inode->lock);
10884 }
10885 
10886 /*
10887  * Verify that there are no ordered extents for a given file range.
10888  *
10889  * @inode:   The target inode.
10890  * @start:   Start offset of the file range, should be sector size aligned.
10891  * @end:     End offset (inclusive) of the file range, its value +1 should be
10892  *           sector size aligned.
10893  *
10894  * This should typically be used for cases where we locked an inode's VFS lock in
10895  * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode,
10896  * we have flushed all delalloc in the range, we have waited for all ordered
10897  * extents in the range to complete and finally we have locked the file range in
10898  * the inode's io_tree.
10899  */
10900 void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end)
10901 {
10902 	struct btrfs_root *root = inode->root;
10903 	struct btrfs_ordered_extent *ordered;
10904 
10905 	if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
10906 		return;
10907 
10908 	ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start);
10909 	if (ordered) {
10910 		btrfs_err(root->fs_info,
10911 "found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])",
10912 			  start, end, btrfs_ino(inode), root->root_key.objectid,
10913 			  ordered->file_offset,
10914 			  ordered->file_offset + ordered->num_bytes - 1);
10915 		btrfs_put_ordered_extent(ordered);
10916 	}
10917 
10918 	ASSERT(ordered == NULL);
10919 }
10920 
10921 static const struct inode_operations btrfs_dir_inode_operations = {
10922 	.getattr	= btrfs_getattr,
10923 	.lookup		= btrfs_lookup,
10924 	.create		= btrfs_create,
10925 	.unlink		= btrfs_unlink,
10926 	.link		= btrfs_link,
10927 	.mkdir		= btrfs_mkdir,
10928 	.rmdir		= btrfs_rmdir,
10929 	.rename		= btrfs_rename2,
10930 	.symlink	= btrfs_symlink,
10931 	.setattr	= btrfs_setattr,
10932 	.mknod		= btrfs_mknod,
10933 	.listxattr	= btrfs_listxattr,
10934 	.permission	= btrfs_permission,
10935 	.get_inode_acl	= btrfs_get_acl,
10936 	.set_acl	= btrfs_set_acl,
10937 	.update_time	= btrfs_update_time,
10938 	.tmpfile        = btrfs_tmpfile,
10939 	.fileattr_get	= btrfs_fileattr_get,
10940 	.fileattr_set	= btrfs_fileattr_set,
10941 };
10942 
10943 static const struct file_operations btrfs_dir_file_operations = {
10944 	.llseek		= btrfs_dir_llseek,
10945 	.read		= generic_read_dir,
10946 	.iterate_shared	= btrfs_real_readdir,
10947 	.open		= btrfs_opendir,
10948 	.unlocked_ioctl	= btrfs_ioctl,
10949 #ifdef CONFIG_COMPAT
10950 	.compat_ioctl	= btrfs_compat_ioctl,
10951 #endif
10952 	.release        = btrfs_release_file,
10953 	.fsync		= btrfs_sync_file,
10954 };
10955 
10956 /*
10957  * btrfs doesn't support the bmap operation because swapfiles
10958  * use bmap to make a mapping of extents in the file.  They assume
10959  * these extents won't change over the life of the file and they
10960  * use the bmap result to do IO directly to the drive.
10961  *
10962  * the btrfs bmap call would return logical addresses that aren't
10963  * suitable for IO and they also will change frequently as COW
10964  * operations happen.  So, swapfile + btrfs == corruption.
10965  *
10966  * For now we're avoiding this by dropping bmap.
10967  */
10968 static const struct address_space_operations btrfs_aops = {
10969 	.read_folio	= btrfs_read_folio,
10970 	.writepages	= btrfs_writepages,
10971 	.readahead	= btrfs_readahead,
10972 	.invalidate_folio = btrfs_invalidate_folio,
10973 	.release_folio	= btrfs_release_folio,
10974 	.migrate_folio	= btrfs_migrate_folio,
10975 	.dirty_folio	= filemap_dirty_folio,
10976 	.error_remove_folio = generic_error_remove_folio,
10977 	.swap_activate	= btrfs_swap_activate,
10978 	.swap_deactivate = btrfs_swap_deactivate,
10979 };
10980 
10981 static const struct inode_operations btrfs_file_inode_operations = {
10982 	.getattr	= btrfs_getattr,
10983 	.setattr	= btrfs_setattr,
10984 	.listxattr      = btrfs_listxattr,
10985 	.permission	= btrfs_permission,
10986 	.fiemap		= btrfs_fiemap,
10987 	.get_inode_acl	= btrfs_get_acl,
10988 	.set_acl	= btrfs_set_acl,
10989 	.update_time	= btrfs_update_time,
10990 	.fileattr_get	= btrfs_fileattr_get,
10991 	.fileattr_set	= btrfs_fileattr_set,
10992 };
10993 static const struct inode_operations btrfs_special_inode_operations = {
10994 	.getattr	= btrfs_getattr,
10995 	.setattr	= btrfs_setattr,
10996 	.permission	= btrfs_permission,
10997 	.listxattr	= btrfs_listxattr,
10998 	.get_inode_acl	= btrfs_get_acl,
10999 	.set_acl	= btrfs_set_acl,
11000 	.update_time	= btrfs_update_time,
11001 };
11002 static const struct inode_operations btrfs_symlink_inode_operations = {
11003 	.get_link	= page_get_link,
11004 	.getattr	= btrfs_getattr,
11005 	.setattr	= btrfs_setattr,
11006 	.permission	= btrfs_permission,
11007 	.listxattr	= btrfs_listxattr,
11008 	.update_time	= btrfs_update_time,
11009 };
11010 
11011 const struct dentry_operations btrfs_dentry_operations = {
11012 	.d_delete	= btrfs_dentry_delete,
11013 };
11014