xref: /linux/fs/btrfs/inode.c (revision 8991448e56cb2118b561eeda193af53b4ff6b632)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/bio.h>
8 #include <linux/blk-cgroup.h>
9 #include <linux/file.h>
10 #include <linux/filelock.h>
11 #include <linux/fs.h>
12 #include <linux/fs_struct.h>
13 #include <linux/pagemap.h>
14 #include <linux/highmem.h>
15 #include <linux/time.h>
16 #include <linux/init.h>
17 #include <linux/string.h>
18 #include <linux/backing-dev.h>
19 #include <linux/writeback.h>
20 #include <linux/compat.h>
21 #include <linux/xattr.h>
22 #include <linux/posix_acl.h>
23 #include <linux/falloc.h>
24 #include <linux/slab.h>
25 #include <linux/ratelimit.h>
26 #include <linux/btrfs.h>
27 #include <linux/blkdev.h>
28 #include <linux/posix_acl_xattr.h>
29 #include <linux/uio.h>
30 #include <linux/magic.h>
31 #include <linux/iversion.h>
32 #include <linux/swap.h>
33 #include <linux/migrate.h>
34 #include <linux/sched/mm.h>
35 #include <linux/iomap.h>
36 #include <linux/unaligned.h>
37 #include "misc.h"
38 #include "ctree.h"
39 #include "disk-io.h"
40 #include "transaction.h"
41 #include "btrfs_inode.h"
42 #include "ordered-data.h"
43 #include "xattr.h"
44 #include "tree-log.h"
45 #include "bio.h"
46 #include "compression.h"
47 #include "locking.h"
48 #include "props.h"
49 #include "qgroup.h"
50 #include "delalloc-space.h"
51 #include "block-group.h"
52 #include "space-info.h"
53 #include "zoned.h"
54 #include "subpage.h"
55 #include "inode-item.h"
56 #include "fs.h"
57 #include "accessors.h"
58 #include "extent-tree.h"
59 #include "root-tree.h"
60 #include "defrag.h"
61 #include "dir-item.h"
62 #include "file-item.h"
63 #include "uuid-tree.h"
64 #include "ioctl.h"
65 #include "file.h"
66 #include "acl.h"
67 #include "relocation.h"
68 #include "verity.h"
69 #include "super.h"
70 #include "orphan.h"
71 #include "backref.h"
72 #include "raid-stripe-tree.h"
73 #include "fiemap.h"
74 #include "delayed-inode.h"
75 
76 #define COW_FILE_RANGE_KEEP_LOCKED	(1UL << 0)
77 #define COW_FILE_RANGE_NO_INLINE	(1UL << 1)
78 
79 struct btrfs_iget_args {
80 	u64 ino;
81 	struct btrfs_root *root;
82 };
83 
84 struct btrfs_rename_ctx {
85 	/* Output field. Stores the index number of the old directory entry. */
86 	u64 index;
87 };
88 
89 /*
90  * Used by data_reloc_print_warning_inode() to pass needed info for filename
91  * resolution and output of error message.
92  */
93 struct data_reloc_warn {
94 	struct btrfs_path path;
95 	struct btrfs_fs_info *fs_info;
96 	u64 extent_item_size;
97 	u64 logical;
98 	int mirror_num;
99 };
100 
101 /*
102  * For the file_extent_tree, we want to hold the inode lock when we lookup and
103  * update the disk_i_size, but lockdep will complain because our io_tree we hold
104  * the tree lock and get the inode lock when setting delalloc. These two things
105  * are unrelated, so make a class for the file_extent_tree so we don't get the
106  * two locking patterns mixed up.
107  */
108 static struct lock_class_key file_extent_tree_class;
109 
110 static const struct inode_operations btrfs_dir_inode_operations;
111 static const struct inode_operations btrfs_symlink_inode_operations;
112 static const struct inode_operations btrfs_special_inode_operations;
113 static const struct inode_operations btrfs_file_inode_operations;
114 static const struct address_space_operations btrfs_aops;
115 static const struct file_operations btrfs_dir_file_operations;
116 
117 static struct kmem_cache *btrfs_inode_cachep;
118 
119 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
120 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback);
121 
122 static noinline int run_delalloc_cow(struct btrfs_inode *inode,
123 				     struct folio *locked_folio, u64 start,
124 				     u64 end, struct writeback_control *wbc,
125 				     bool pages_dirty);
126 
data_reloc_print_warning_inode(u64 inum,u64 offset,u64 num_bytes,u64 root,void * warn_ctx)127 static int data_reloc_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
128 					  u64 root, void *warn_ctx)
129 {
130 	struct data_reloc_warn *warn = warn_ctx;
131 	struct btrfs_fs_info *fs_info = warn->fs_info;
132 	struct extent_buffer *eb;
133 	struct btrfs_inode_item *inode_item;
134 	struct inode_fs_paths *ipath __free(inode_fs_paths) = NULL;
135 	struct btrfs_root *local_root;
136 	struct btrfs_key key;
137 	unsigned int nofs_flag;
138 	u32 nlink;
139 	int ret;
140 
141 	local_root = btrfs_get_fs_root(fs_info, root, true);
142 	if (IS_ERR(local_root)) {
143 		ret = PTR_ERR(local_root);
144 		goto err;
145 	}
146 
147 	/* This makes the path point to (inum INODE_ITEM ioff). */
148 	key.objectid = inum;
149 	key.type = BTRFS_INODE_ITEM_KEY;
150 	key.offset = 0;
151 
152 	ret = btrfs_search_slot(NULL, local_root, &key, &warn->path, 0, 0);
153 	if (ret) {
154 		btrfs_put_root(local_root);
155 		btrfs_release_path(&warn->path);
156 		goto err;
157 	}
158 
159 	eb = warn->path.nodes[0];
160 	inode_item = btrfs_item_ptr(eb, warn->path.slots[0], struct btrfs_inode_item);
161 	nlink = btrfs_inode_nlink(eb, inode_item);
162 	btrfs_release_path(&warn->path);
163 
164 	nofs_flag = memalloc_nofs_save();
165 	ipath = init_ipath(4096, local_root, &warn->path);
166 	memalloc_nofs_restore(nofs_flag);
167 	if (IS_ERR(ipath)) {
168 		btrfs_put_root(local_root);
169 		ret = PTR_ERR(ipath);
170 		ipath = NULL;
171 		/*
172 		 * -ENOMEM, not a critical error, just output an generic error
173 		 * without filename.
174 		 */
175 		btrfs_warn(fs_info,
176 "checksum error at logical %llu mirror %u root %llu, inode %llu offset %llu",
177 			   warn->logical, warn->mirror_num, root, inum, offset);
178 		return ret;
179 	}
180 	ret = paths_from_inode(inum, ipath);
181 	if (ret < 0) {
182 		btrfs_put_root(local_root);
183 		goto err;
184 	}
185 
186 	/*
187 	 * We deliberately ignore the bit ipath might have been too small to
188 	 * hold all of the paths here
189 	 */
190 	for (int i = 0; i < ipath->fspath->elem_cnt; i++) {
191 		btrfs_warn(fs_info,
192 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu length %u links %u (path: %s)",
193 			   warn->logical, warn->mirror_num, root, inum, offset,
194 			   fs_info->sectorsize, nlink,
195 			   (char *)(unsigned long)ipath->fspath->val[i]);
196 	}
197 
198 	btrfs_put_root(local_root);
199 	return 0;
200 
201 err:
202 	btrfs_warn(fs_info,
203 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu, path resolving failed with ret=%d",
204 		   warn->logical, warn->mirror_num, root, inum, offset, ret);
205 
206 	return ret;
207 }
208 
209 /*
210  * Do extra user-friendly error output (e.g. lookup all the affected files).
211  *
212  * Return true if we succeeded doing the backref lookup.
213  * Return false if such lookup failed, and has to fallback to the old error message.
214  */
print_data_reloc_error(const struct btrfs_inode * inode,u64 file_off,const u8 * csum,const u8 * csum_expected,int mirror_num)215 static void print_data_reloc_error(const struct btrfs_inode *inode, u64 file_off,
216 				   const u8 *csum, const u8 *csum_expected,
217 				   int mirror_num)
218 {
219 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
220 	BTRFS_PATH_AUTO_RELEASE(path);
221 	struct btrfs_key found_key = { 0 };
222 	struct extent_buffer *eb;
223 	struct btrfs_extent_item *ei;
224 	const u32 csum_size = fs_info->csum_size;
225 	u64 logical;
226 	u64 flags;
227 	u32 item_size;
228 	int ret;
229 
230 	mutex_lock(&fs_info->reloc_mutex);
231 	logical = btrfs_get_reloc_bg_bytenr(fs_info);
232 	mutex_unlock(&fs_info->reloc_mutex);
233 
234 	if (logical == U64_MAX) {
235 		btrfs_warn_rl(fs_info, "has data reloc tree but no running relocation");
236 		btrfs_warn_rl(fs_info,
237 "csum failed root %lld ino %llu off %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d",
238 			btrfs_root_id(inode->root), btrfs_ino(inode), file_off,
239 			BTRFS_CSUM_FMT_VALUE(csum_size, csum),
240 			BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected),
241 			mirror_num);
242 		return;
243 	}
244 
245 	logical += file_off;
246 	btrfs_warn_rl(fs_info,
247 "csum failed root %lld ino %llu off %llu logical %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d",
248 			btrfs_root_id(inode->root),
249 			btrfs_ino(inode), file_off, logical,
250 			BTRFS_CSUM_FMT_VALUE(csum_size, csum),
251 			BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected),
252 			mirror_num);
253 
254 	ret = extent_from_logical(fs_info, logical, &path, &found_key, &flags);
255 	if (ret < 0) {
256 		btrfs_err_rl(fs_info, "failed to lookup extent item for logical %llu: %d",
257 			     logical, ret);
258 		return;
259 	}
260 	eb = path.nodes[0];
261 	ei = btrfs_item_ptr(eb, path.slots[0], struct btrfs_extent_item);
262 	item_size = btrfs_item_size(eb, path.slots[0]);
263 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
264 		unsigned long ptr = 0;
265 		u64 ref_root;
266 		u8 ref_level;
267 
268 		while (true) {
269 			ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
270 						      item_size, &ref_root,
271 						      &ref_level);
272 			if (ret < 0) {
273 				btrfs_warn_rl(fs_info,
274 				"failed to resolve tree backref for logical %llu: %d",
275 					      logical, ret);
276 				break;
277 			}
278 			if (ret > 0)
279 				break;
280 
281 			btrfs_warn_rl(fs_info,
282 "csum error at logical %llu mirror %u: metadata %s (level %d) in tree %llu",
283 				logical, mirror_num,
284 				(ref_level ? "node" : "leaf"),
285 				ref_level, ref_root);
286 		}
287 	} else {
288 		struct btrfs_backref_walk_ctx ctx = { 0 };
289 		struct data_reloc_warn reloc_warn = { 0 };
290 
291 		/*
292 		 * Do not hold the path as later iterate_extent_inodes() call
293 		 * can be time consuming.
294 		 */
295 		btrfs_release_path(&path);
296 
297 		ctx.bytenr = found_key.objectid;
298 		ctx.extent_item_pos = logical - found_key.objectid;
299 		ctx.fs_info = fs_info;
300 
301 		reloc_warn.logical = logical;
302 		reloc_warn.extent_item_size = found_key.offset;
303 		reloc_warn.mirror_num = mirror_num;
304 		reloc_warn.fs_info = fs_info;
305 
306 		iterate_extent_inodes(&ctx, true,
307 				      data_reloc_print_warning_inode, &reloc_warn);
308 	}
309 }
310 
btrfs_print_data_csum_error(struct btrfs_inode * inode,u64 logical_start,u8 * csum,u8 * csum_expected,int mirror_num)311 static void __cold btrfs_print_data_csum_error(struct btrfs_inode *inode,
312 		u64 logical_start, u8 *csum, u8 *csum_expected, int mirror_num)
313 {
314 	struct btrfs_root *root = inode->root;
315 	const u32 csum_size = root->fs_info->csum_size;
316 
317 	/* For data reloc tree, it's better to do a backref lookup instead. */
318 	if (btrfs_is_data_reloc_root(root))
319 		return print_data_reloc_error(inode, logical_start, csum,
320 					      csum_expected, mirror_num);
321 
322 	/* Output without objectid, which is more meaningful */
323 	if (btrfs_root_id(root) >= BTRFS_LAST_FREE_OBJECTID) {
324 		btrfs_warn_rl(root->fs_info,
325 "csum failed root %lld ino %lld off %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d",
326 			btrfs_root_id(root), btrfs_ino(inode),
327 			logical_start,
328 			BTRFS_CSUM_FMT_VALUE(csum_size, csum),
329 			BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected),
330 			mirror_num);
331 	} else {
332 		btrfs_warn_rl(root->fs_info,
333 "csum failed root %llu ino %llu off %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d",
334 			btrfs_root_id(root), btrfs_ino(inode),
335 			logical_start,
336 			BTRFS_CSUM_FMT_VALUE(csum_size, csum),
337 			BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected),
338 			mirror_num);
339 	}
340 }
341 
342 /*
343  * Lock inode i_rwsem based on arguments passed.
344  *
345  * ilock_flags can have the following bit set:
346  *
347  * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode
348  * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt
349  *		     return -EAGAIN
350  * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock
351  */
btrfs_inode_lock(struct btrfs_inode * inode,unsigned int ilock_flags)352 int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags)
353 {
354 	if (ilock_flags & BTRFS_ILOCK_SHARED) {
355 		if (ilock_flags & BTRFS_ILOCK_TRY) {
356 			if (!inode_trylock_shared(&inode->vfs_inode))
357 				return -EAGAIN;
358 			else
359 				return 0;
360 		}
361 		inode_lock_shared(&inode->vfs_inode);
362 	} else {
363 		if (ilock_flags & BTRFS_ILOCK_TRY) {
364 			if (!inode_trylock(&inode->vfs_inode))
365 				return -EAGAIN;
366 			else
367 				return 0;
368 		}
369 		inode_lock(&inode->vfs_inode);
370 	}
371 	if (ilock_flags & BTRFS_ILOCK_MMAP)
372 		down_write(&inode->i_mmap_lock);
373 	return 0;
374 }
375 
376 /*
377  * Unlock inode i_rwsem.
378  *
379  * ilock_flags should contain the same bits set as passed to btrfs_inode_lock()
380  * to decide whether the lock acquired is shared or exclusive.
381  */
btrfs_inode_unlock(struct btrfs_inode * inode,unsigned int ilock_flags)382 void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags)
383 {
384 	if (ilock_flags & BTRFS_ILOCK_MMAP)
385 		up_write(&inode->i_mmap_lock);
386 	if (ilock_flags & BTRFS_ILOCK_SHARED)
387 		inode_unlock_shared(&inode->vfs_inode);
388 	else
389 		inode_unlock(&inode->vfs_inode);
390 }
391 
392 /*
393  * Cleanup all submitted ordered extents in specified range to handle errors
394  * from the btrfs_run_delalloc_range() callback.
395  *
396  * NOTE: caller must ensure that when an error happens, it can not call
397  * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
398  * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
399  * to be released, which we want to happen only when finishing the ordered
400  * extent (btrfs_finish_ordered_io()).
401  */
btrfs_cleanup_ordered_extents(struct btrfs_inode * inode,u64 offset,u64 bytes)402 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
403 						 u64 offset, u64 bytes)
404 {
405 	pgoff_t index = offset >> PAGE_SHIFT;
406 	const pgoff_t end_index = (offset + bytes - 1) >> PAGE_SHIFT;
407 	struct folio *folio;
408 
409 	while (index <= end_index) {
410 		folio = filemap_get_folio(inode->vfs_inode.i_mapping, index);
411 		if (IS_ERR(folio)) {
412 			index++;
413 			continue;
414 		}
415 
416 		index = folio_next_index(folio);
417 		/*
418 		 * Here we just clear all Ordered bits for every page in the
419 		 * range, then btrfs_mark_ordered_io_finished() will handle
420 		 * the ordered extent accounting for the range.
421 		 */
422 		btrfs_folio_clamp_clear_ordered(inode->root->fs_info, folio,
423 						offset, bytes);
424 		folio_put(folio);
425 	}
426 
427 	return btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes, false);
428 }
429 
430 static int btrfs_dirty_inode(struct btrfs_inode *inode);
431 
btrfs_init_inode_security(struct btrfs_trans_handle * trans,struct btrfs_new_inode_args * args)432 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
433 				     struct btrfs_new_inode_args *args)
434 {
435 	int ret;
436 
437 	if (args->default_acl) {
438 		ret = __btrfs_set_acl(trans, args->inode, args->default_acl,
439 				      ACL_TYPE_DEFAULT);
440 		if (ret)
441 			return ret;
442 	}
443 	if (args->acl) {
444 		ret = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS);
445 		if (ret)
446 			return ret;
447 	}
448 	if (!args->default_acl && !args->acl)
449 		cache_no_acl(args->inode);
450 	return btrfs_xattr_security_init(trans, args->inode, args->dir,
451 					 &args->dentry->d_name);
452 }
453 
454 /*
455  * this does all the hard work for inserting an inline extent into
456  * the btree.  The caller should have done a btrfs_drop_extents so that
457  * no overlapping inline items exist in the btree
458  */
insert_inline_extent(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_inode * inode,bool extent_inserted,size_t size,size_t compressed_size,int compress_type,struct folio * compressed_folio,bool update_i_size)459 static int insert_inline_extent(struct btrfs_trans_handle *trans,
460 				struct btrfs_path *path,
461 				struct btrfs_inode *inode, bool extent_inserted,
462 				size_t size, size_t compressed_size,
463 				int compress_type,
464 				struct folio *compressed_folio,
465 				bool update_i_size)
466 {
467 	struct btrfs_root *root = inode->root;
468 	struct extent_buffer *leaf;
469 	const u32 sectorsize = trans->fs_info->sectorsize;
470 	char *kaddr;
471 	unsigned long ptr;
472 	struct btrfs_file_extent_item *ei;
473 	int ret;
474 	size_t cur_size = size;
475 	u64 i_size;
476 
477 	/*
478 	 * The decompressed size must still be no larger than a sector.  Under
479 	 * heavy race, we can have size == 0 passed in, but that shouldn't be a
480 	 * big deal and we can continue the insertion.
481 	 */
482 	ASSERT(size <= sectorsize);
483 
484 	/*
485 	 * The compressed size also needs to be no larger than a page.
486 	 * That's also why we only need one folio as the parameter.
487 	 */
488 	if (compressed_folio) {
489 		ASSERT(compressed_size <= sectorsize);
490 		ASSERT(compressed_size <= PAGE_SIZE);
491 	} else {
492 		ASSERT(compressed_size == 0);
493 	}
494 
495 	if (compressed_size && compressed_folio)
496 		cur_size = compressed_size;
497 
498 	if (!extent_inserted) {
499 		struct btrfs_key key;
500 		size_t datasize;
501 
502 		key.objectid = btrfs_ino(inode);
503 		key.type = BTRFS_EXTENT_DATA_KEY;
504 		key.offset = 0;
505 
506 		datasize = btrfs_file_extent_calc_inline_size(cur_size);
507 		ret = btrfs_insert_empty_item(trans, root, path, &key,
508 					      datasize);
509 		if (ret)
510 			return ret;
511 	}
512 	leaf = path->nodes[0];
513 	ei = btrfs_item_ptr(leaf, path->slots[0],
514 			    struct btrfs_file_extent_item);
515 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
516 	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
517 	btrfs_set_file_extent_encryption(leaf, ei, 0);
518 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
519 	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
520 	ptr = btrfs_file_extent_inline_start(ei);
521 
522 	if (compress_type != BTRFS_COMPRESS_NONE) {
523 		kaddr = kmap_local_folio(compressed_folio, 0);
524 		write_extent_buffer(leaf, kaddr, ptr, compressed_size);
525 		kunmap_local(kaddr);
526 
527 		btrfs_set_file_extent_compression(leaf, ei,
528 						  compress_type);
529 	} else {
530 		struct folio *folio;
531 
532 		folio = filemap_get_folio(inode->vfs_inode.i_mapping, 0);
533 		ASSERT(!IS_ERR(folio));
534 		btrfs_set_file_extent_compression(leaf, ei, 0);
535 		kaddr = kmap_local_folio(folio, 0);
536 		write_extent_buffer(leaf, kaddr, ptr, size);
537 		kunmap_local(kaddr);
538 		folio_put(folio);
539 	}
540 	btrfs_release_path(path);
541 
542 	/*
543 	 * We align size to sectorsize for inline extents just for simplicity
544 	 * sake.
545 	 */
546 	ret = btrfs_inode_set_file_extent_range(inode, 0,
547 					ALIGN(size, root->fs_info->sectorsize));
548 	if (ret)
549 		return ret;
550 
551 	/*
552 	 * We're an inline extent, so nobody can extend the file past i_size
553 	 * without locking a page we already have locked.
554 	 *
555 	 * We must do any i_size and inode updates before we unlock the pages.
556 	 * Otherwise we could end up racing with unlink.
557 	 */
558 	i_size = i_size_read(&inode->vfs_inode);
559 	if (update_i_size && size > i_size) {
560 		i_size_write(&inode->vfs_inode, size);
561 		i_size = size;
562 	}
563 	inode->disk_i_size = i_size;
564 
565 	return 0;
566 }
567 
can_cow_file_range_inline(struct btrfs_inode * inode,u64 offset,u64 size,size_t compressed_size)568 static bool can_cow_file_range_inline(struct btrfs_inode *inode,
569 				      u64 offset, u64 size,
570 				      size_t compressed_size)
571 {
572 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
573 	u64 data_len = (compressed_size ?: size);
574 
575 	/* Inline extents must start at offset 0. */
576 	if (offset != 0)
577 		return false;
578 
579 	/*
580 	 * Even for bs > ps cases, cow_file_range_inline() can only accept a
581 	 * single folio.
582 	 *
583 	 * This can be problematic and cause access beyond page boundary if a
584 	 * page sized folio is passed into that function.
585 	 * And encoded write is doing exactly that.
586 	 * So here limits the inlined extent size to PAGE_SIZE.
587 	 */
588 	if (size > PAGE_SIZE || compressed_size > PAGE_SIZE)
589 		return false;
590 
591 	/* Inline extents are limited to sectorsize. */
592 	if (size > fs_info->sectorsize)
593 		return false;
594 
595 	/* We do not allow a non-compressed extent to be as large as block size. */
596 	if (data_len >= fs_info->sectorsize)
597 		return false;
598 
599 	/* We cannot exceed the maximum inline data size. */
600 	if (data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
601 		return false;
602 
603 	/* We cannot exceed the user specified max_inline size. */
604 	if (data_len > fs_info->max_inline)
605 		return false;
606 
607 	/* Inline extents must be the entirety of the file. */
608 	if (size < i_size_read(&inode->vfs_inode))
609 		return false;
610 
611 	/* Encrypted file cannot be inlined. */
612 	if (IS_ENCRYPTED(&inode->vfs_inode))
613 		return false;
614 
615 	return true;
616 }
617 
618 /*
619  * conditionally insert an inline extent into the file.  This
620  * does the checks required to make sure the data is small enough
621  * to fit as an inline extent.
622  *
623  * If being used directly, you must have already checked we're allowed to cow
624  * the range by getting true from can_cow_file_range_inline().
625  */
__cow_file_range_inline(struct btrfs_inode * inode,u64 size,size_t compressed_size,int compress_type,struct folio * compressed_folio,bool update_i_size)626 static noinline int __cow_file_range_inline(struct btrfs_inode *inode,
627 					    u64 size, size_t compressed_size,
628 					    int compress_type,
629 					    struct folio *compressed_folio,
630 					    bool update_i_size)
631 {
632 	struct btrfs_drop_extents_args drop_args = { 0 };
633 	struct btrfs_root *root = inode->root;
634 	struct btrfs_fs_info *fs_info = root->fs_info;
635 	struct btrfs_trans_handle *trans = NULL;
636 	u64 data_len = (compressed_size ?: size);
637 	int ret;
638 	struct btrfs_path *path;
639 
640 	path = btrfs_alloc_path();
641 	if (!path) {
642 		ret = -ENOMEM;
643 		goto out;
644 	}
645 
646 	trans = btrfs_join_transaction(root);
647 	if (IS_ERR(trans)) {
648 		ret = PTR_ERR(trans);
649 		trans = NULL;
650 		goto out;
651 	}
652 	trans->block_rsv = &inode->block_rsv;
653 
654 	drop_args.path = path;
655 	drop_args.start = 0;
656 	drop_args.end = fs_info->sectorsize;
657 	drop_args.drop_cache = true;
658 	drop_args.replace_extent = true;
659 	drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(data_len);
660 	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
661 	if (unlikely(ret)) {
662 		btrfs_abort_transaction(trans, ret);
663 		goto out;
664 	}
665 
666 	ret = insert_inline_extent(trans, path, inode, drop_args.extent_inserted,
667 				   size, compressed_size, compress_type,
668 				   compressed_folio, update_i_size);
669 	if (unlikely(ret && ret != -ENOSPC)) {
670 		btrfs_abort_transaction(trans, ret);
671 		goto out;
672 	} else if (ret == -ENOSPC) {
673 		ret = 1;
674 		goto out;
675 	}
676 
677 	btrfs_update_inode_bytes(inode, size, drop_args.bytes_found);
678 	ret = btrfs_update_inode(trans, inode);
679 	if (unlikely(ret && ret != -ENOSPC)) {
680 		btrfs_abort_transaction(trans, ret);
681 		goto out;
682 	} else if (ret == -ENOSPC) {
683 		ret = 1;
684 		goto out;
685 	}
686 
687 	btrfs_set_inode_full_sync(inode);
688 out:
689 	/*
690 	 * Don't forget to free the reserved space, as for inlined extent
691 	 * it won't count as data extent, free them directly here.
692 	 * And at reserve time, it's always aligned to sector size, so
693 	 * just free one sector here.
694 	 *
695 	 * If we fallback to non-inline (ret == 1) due to -ENOSPC, then we need
696 	 * to keep the data reservation.
697 	 */
698 	if (ret <= 0)
699 		btrfs_qgroup_free_data(inode, NULL, 0, fs_info->sectorsize, NULL);
700 	btrfs_free_path(path);
701 	if (trans)
702 		btrfs_end_transaction(trans);
703 	return ret;
704 }
705 
cow_file_range_inline(struct btrfs_inode * inode,struct folio * locked_folio,u64 offset,u64 end,size_t compressed_size,int compress_type,struct folio * compressed_folio,bool update_i_size)706 static noinline int cow_file_range_inline(struct btrfs_inode *inode,
707 					  struct folio *locked_folio,
708 					  u64 offset, u64 end,
709 					  size_t compressed_size,
710 					  int compress_type,
711 					  struct folio *compressed_folio,
712 					  bool update_i_size)
713 {
714 	struct extent_state *cached = NULL;
715 	unsigned long clear_flags = EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
716 		EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING | EXTENT_LOCKED;
717 	u64 size = min_t(u64, i_size_read(&inode->vfs_inode), end + 1);
718 	int ret;
719 
720 	if (!can_cow_file_range_inline(inode, offset, size, compressed_size))
721 		return 1;
722 
723 	btrfs_lock_extent(&inode->io_tree, offset, end, &cached);
724 	ret = __cow_file_range_inline(inode, size, compressed_size,
725 				      compress_type, compressed_folio,
726 				      update_i_size);
727 	if (ret > 0) {
728 		btrfs_unlock_extent(&inode->io_tree, offset, end, &cached);
729 		return ret;
730 	}
731 
732 	/*
733 	 * In the successful case (ret == 0 here), cow_file_range will return 1.
734 	 *
735 	 * Quite a bit further up the callstack in extent_writepage(), ret == 1
736 	 * is treated as a short circuited success and does not unlock the folio,
737 	 * so we must do it here.
738 	 *
739 	 * In the failure case, the locked_folio does get unlocked by
740 	 * btrfs_folio_end_all_writers, which asserts that it is still locked
741 	 * at that point, so we must *not* unlock it here.
742 	 *
743 	 * The other two callsites in compress_file_range do not have a
744 	 * locked_folio, so they are not relevant to this logic.
745 	 */
746 	if (ret == 0)
747 		locked_folio = NULL;
748 
749 	extent_clear_unlock_delalloc(inode, offset, end, locked_folio, &cached,
750 				     clear_flags, PAGE_UNLOCK |
751 				     PAGE_START_WRITEBACK | PAGE_END_WRITEBACK);
752 	return ret;
753 }
754 
755 struct async_extent {
756 	u64 start;
757 	u64 ram_size;
758 	struct compressed_bio *cb;
759 	struct list_head list;
760 };
761 
762 struct async_chunk {
763 	struct btrfs_inode *inode;
764 	struct folio *locked_folio;
765 	u64 start;
766 	u64 end;
767 	blk_opf_t write_flags;
768 	struct list_head extents;
769 	struct cgroup_subsys_state *blkcg_css;
770 	struct btrfs_work work;
771 	struct async_cow *async_cow;
772 };
773 
774 struct async_cow {
775 	atomic_t num_chunks;
776 	struct async_chunk chunks[];
777 };
778 
add_async_extent(struct async_chunk * cow,u64 start,u64 ram_size,struct compressed_bio * cb)779 static int add_async_extent(struct async_chunk *cow, u64 start, u64 ram_size,
780 			    struct compressed_bio *cb)
781 {
782 	struct async_extent *async_extent;
783 
784 	async_extent = kmalloc_obj(*async_extent, GFP_NOFS);
785 	if (!async_extent)
786 		return -ENOMEM;
787 	ASSERT(ram_size < U32_MAX);
788 	async_extent->start = start;
789 	async_extent->ram_size = ram_size;
790 	async_extent->cb = cb;
791 	list_add_tail(&async_extent->list, &cow->extents);
792 	return 0;
793 }
794 
795 /*
796  * Check if the inode needs to be submitted to compression, based on mount
797  * options, defragmentation, properties or heuristics.
798  */
inode_need_compress(struct btrfs_inode * inode,u64 start,u64 end)799 static inline int inode_need_compress(struct btrfs_inode *inode, u64 start,
800 				      u64 end)
801 {
802 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
803 
804 	if (!btrfs_inode_can_compress(inode)) {
805 		DEBUG_WARN("BTRFS: unexpected compression for ino %llu", btrfs_ino(inode));
806 		return 0;
807 	}
808 
809 	/*
810 	 * If the delalloc range is only one fs block and can not be inlined,
811 	 * do not even bother try compression, as there will be no space saving
812 	 * and will always fallback to regular write later.
813 	 */
814 	if (start != 0 && end + 1 - start <= fs_info->sectorsize)
815 		return 0;
816 	/* Defrag ioctl takes precedence over mount options and properties. */
817 	if (inode->defrag_compress == BTRFS_DEFRAG_DONT_COMPRESS)
818 		return 0;
819 	if (BTRFS_COMPRESS_NONE < inode->defrag_compress &&
820 	    inode->defrag_compress < BTRFS_NR_COMPRESS_TYPES)
821 		return 1;
822 	/* force compress */
823 	if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
824 		return 1;
825 	/* bad compression ratios */
826 	if (inode->flags & BTRFS_INODE_NOCOMPRESS)
827 		return 0;
828 	if (btrfs_test_opt(fs_info, COMPRESS) ||
829 	    inode->flags & BTRFS_INODE_COMPRESS ||
830 	    inode->prop_compress)
831 		return btrfs_compress_heuristic(inode, start, end);
832 	return 0;
833 }
834 
inode_should_defrag(struct btrfs_inode * inode,u64 start,u64 end,u64 num_bytes,u32 small_write)835 static inline void inode_should_defrag(struct btrfs_inode *inode,
836 		u64 start, u64 end, u64 num_bytes, u32 small_write)
837 {
838 	/* If this is a small write inside eof, kick off a defrag */
839 	if (num_bytes < small_write &&
840 	    (start > 0 || end + 1 < inode->disk_i_size))
841 		btrfs_add_inode_defrag(inode, small_write);
842 }
843 
extent_range_clear_dirty_for_io(struct btrfs_inode * inode,u64 start,u64 end)844 static int extent_range_clear_dirty_for_io(struct btrfs_inode *inode, u64 start, u64 end)
845 {
846 	const pgoff_t end_index = end >> PAGE_SHIFT;
847 	struct folio *folio;
848 	int ret = 0;
849 
850 	for (pgoff_t index = start >> PAGE_SHIFT; index <= end_index; index++) {
851 		folio = filemap_get_folio(inode->vfs_inode.i_mapping, index);
852 		if (IS_ERR(folio)) {
853 			if (!ret)
854 				ret = PTR_ERR(folio);
855 			continue;
856 		}
857 		btrfs_folio_clamp_clear_dirty(inode->root->fs_info, folio, start,
858 					      end + 1 - start);
859 		folio_put(folio);
860 	}
861 	return ret;
862 }
863 
compressed_bio_last_folio(struct compressed_bio * cb)864 static struct folio *compressed_bio_last_folio(struct compressed_bio *cb)
865 {
866 	struct bio *bio = &cb->bbio.bio;
867 	struct bio_vec *bvec;
868 	phys_addr_t paddr;
869 
870 	/*
871 	 * Make sure all folios have the same min_folio_size.
872 	 *
873 	 * Otherwise we cannot simply use offset_in_offset(folio, bi_size) to
874 	 * calculate the end of the last folio.
875 	 */
876 	if (IS_ENABLED(CONFIG_BTRFS_ASSERT)) {
877 		struct btrfs_fs_info *fs_info = cb_to_fs_info(cb);
878 		const u32 min_folio_size = btrfs_min_folio_size(fs_info);
879 		struct folio_iter fi;
880 
881 		bio_for_each_folio_all(fi, bio)
882 			ASSERT(folio_size(fi.folio) == min_folio_size);
883 	}
884 
885 	/* The bio must not be empty. */
886 	ASSERT(bio->bi_vcnt);
887 
888 	bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
889 	paddr = page_to_phys(bvec->bv_page) + bvec->bv_offset + bvec->bv_len - 1;
890 	return page_folio(phys_to_page(paddr));
891 }
892 
zero_last_folio(struct compressed_bio * cb)893 static void zero_last_folio(struct compressed_bio *cb)
894 {
895 	struct bio *bio = &cb->bbio.bio;
896 	struct folio *last_folio = compressed_bio_last_folio(cb);
897 	const u32 bio_size = bio->bi_iter.bi_size;
898 	const u32 foffset = offset_in_folio(last_folio, bio_size);
899 
900 	folio_zero_range(last_folio, foffset, folio_size(last_folio) - foffset);
901 }
902 
round_up_last_block(struct compressed_bio * cb,u32 blocksize)903 static void round_up_last_block(struct compressed_bio *cb, u32 blocksize)
904 {
905 	struct bio *bio = &cb->bbio.bio;
906 	struct folio *last_folio = compressed_bio_last_folio(cb);
907 	const u32 bio_size = bio->bi_iter.bi_size;
908 	const u32 foffset = offset_in_folio(last_folio, bio_size);
909 	bool ret;
910 
911 	if (IS_ALIGNED(bio_size, blocksize))
912 		return;
913 
914 	ret = bio_add_folio(bio, last_folio, round_up(foffset, blocksize) - foffset, foffset);
915 	/* The remaining part should be merged thus never fail. */
916 	ASSERT(ret);
917 }
918 
919 /*
920  * Work queue call back to started compression on a file and pages.
921  *
922  * This is done inside an ordered work queue, and the compression is spread
923  * across many cpus.  The actual IO submission is step two, and the ordered work
924  * queue takes care of making sure that happens in the same order things were
925  * put onto the queue by writepages and friends.
926  *
927  * If this code finds it can't get good compression, it puts an entry onto the
928  * work queue to write the uncompressed bytes.  This makes sure that both
929  * compressed inodes and uncompressed inodes are written in the same order that
930  * the flusher thread sent them down.
931  */
compress_file_range(struct btrfs_work * work)932 static void compress_file_range(struct btrfs_work *work)
933 {
934 	struct async_chunk *async_chunk =
935 		container_of(work, struct async_chunk, work);
936 	struct btrfs_inode *inode = async_chunk->inode;
937 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
938 	struct address_space *mapping = inode->vfs_inode.i_mapping;
939 	struct compressed_bio *cb = NULL;
940 	const u32 min_folio_size = btrfs_min_folio_size(fs_info);
941 	u64 blocksize = fs_info->sectorsize;
942 	u64 start = async_chunk->start;
943 	u64 end = async_chunk->end;
944 	u64 actual_end;
945 	u64 i_size;
946 	u32 cur_len;
947 	int ret = 0;
948 	unsigned long total_compressed = 0;
949 	unsigned long total_in = 0;
950 	unsigned int loff;
951 	int compress_type = fs_info->compress_type;
952 	int compress_level = fs_info->compress_level;
953 
954 	if (btrfs_is_shutdown(fs_info))
955 		goto cleanup_and_bail_uncompressed;
956 
957 	inode_should_defrag(inode, start, end, end - start + 1, SZ_16K);
958 
959 	/*
960 	 * We need to call clear_page_dirty_for_io on each page in the range.
961 	 * Otherwise applications with the file mmap'd can wander in and change
962 	 * the page contents while we are compressing them.
963 	 */
964 	ret = extent_range_clear_dirty_for_io(inode, start, end);
965 
966 	/*
967 	 * All the folios should have been locked thus no failure.
968 	 *
969 	 * And even if some folios are missing, btrfs_compress_bio()
970 	 * would handle them correctly, so here just do an ASSERT() check for
971 	 * early logic errors.
972 	 */
973 	ASSERT(ret == 0);
974 
975 	/*
976 	 * We need to save i_size before now because it could change in between
977 	 * us evaluating the size and assigning it.  This is because we lock and
978 	 * unlock the page in truncate and fallocate, and then modify the i_size
979 	 * later on.
980 	 *
981 	 * The barriers are to emulate READ_ONCE, remove that once i_size_read
982 	 * does that for us.
983 	 */
984 	barrier();
985 	i_size = i_size_read(&inode->vfs_inode);
986 	barrier();
987 	actual_end = min_t(u64, i_size, end + 1);
988 again:
989 	total_in = 0;
990 	cur_len = min(end + 1 - start, BTRFS_MAX_UNCOMPRESSED);
991 	ret = 0;
992 	cb = NULL;
993 
994 	/*
995 	 * we don't want to send crud past the end of i_size through
996 	 * compression, that's just a waste of CPU time.  So, if the
997 	 * end of the file is before the start of our current
998 	 * requested range of bytes, we bail out to the uncompressed
999 	 * cleanup code that can deal with all of this.
1000 	 *
1001 	 * It isn't really the fastest way to fix things, but this is a
1002 	 * very uncommon corner.
1003 	 */
1004 	if (actual_end <= start)
1005 		goto cleanup_and_bail_uncompressed;
1006 
1007 	/*
1008 	 * We do compression for mount -o compress and when the inode has not
1009 	 * been flagged as NOCOMPRESS.  This flag can change at any time if we
1010 	 * discover bad compression ratios.
1011 	 */
1012 	if (!inode_need_compress(inode, start, end))
1013 		goto cleanup_and_bail_uncompressed;
1014 
1015 	if (0 < inode->defrag_compress && inode->defrag_compress < BTRFS_NR_COMPRESS_TYPES) {
1016 		compress_type = inode->defrag_compress;
1017 		compress_level = inode->defrag_compress_level;
1018 	} else if (inode->prop_compress) {
1019 		compress_type = inode->prop_compress;
1020 	}
1021 
1022 	/* Compression level is applied here. */
1023 	cb = btrfs_compress_bio(inode, start, cur_len, compress_type,
1024 				 compress_level, async_chunk->write_flags);
1025 	if (IS_ERR(cb)) {
1026 		cb = NULL;
1027 		goto mark_incompressible;
1028 	}
1029 
1030 	total_compressed = cb->bbio.bio.bi_iter.bi_size;
1031 	total_in = cur_len;
1032 
1033 	/*
1034 	 * Zero the tail end of the last folio, as we might be sending it down
1035 	 * to disk.
1036 	 */
1037 	loff = (total_compressed & (min_folio_size - 1));
1038 	if (loff)
1039 		zero_last_folio(cb);
1040 
1041 	/*
1042 	 * Try to create an inline extent.
1043 	 *
1044 	 * If we didn't compress the entire range, try to create an uncompressed
1045 	 * inline extent, else a compressed one.
1046 	 *
1047 	 * Check cow_file_range() for why we don't even try to create inline
1048 	 * extent for the subpage case.
1049 	 */
1050 	if (total_in < actual_end)
1051 		ret = cow_file_range_inline(inode, NULL, start, end, 0,
1052 					    BTRFS_COMPRESS_NONE, NULL, false);
1053 	else
1054 		ret = cow_file_range_inline(inode, NULL, start, end, total_compressed,
1055 					    compress_type,
1056 					    bio_first_folio_all(&cb->bbio.bio), false);
1057 	if (ret <= 0) {
1058 		cleanup_compressed_bio(cb);
1059 		if (ret < 0)
1060 			mapping_set_error(mapping, -EIO);
1061 		return;
1062 	}
1063 
1064 	/*
1065 	 * We aren't doing an inline extent. Round the compressed size up to a
1066 	 * block size boundary so the allocator does sane things.
1067 	 */
1068 	total_compressed = ALIGN(total_compressed, blocksize);
1069 	round_up_last_block(cb, blocksize);
1070 
1071 	/*
1072 	 * One last check to make sure the compression is really a win, compare
1073 	 * the page count read with the blocks on disk, compression must free at
1074 	 * least one sector.
1075 	 */
1076 	total_in = round_up(total_in, fs_info->sectorsize);
1077 	if (total_compressed + blocksize > total_in)
1078 		goto mark_incompressible;
1079 
1080 
1081 	/*
1082 	 * The async work queues will take care of doing actual allocation on
1083 	 * disk for these compressed pages, and will submit the bios.
1084 	 */
1085 	ret = add_async_extent(async_chunk, start, total_in, cb);
1086 	BUG_ON(ret);
1087 	if (start + total_in < end) {
1088 		start += total_in;
1089 		cond_resched();
1090 		goto again;
1091 	}
1092 	return;
1093 
1094 mark_incompressible:
1095 	if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && !inode->prop_compress)
1096 		inode->flags |= BTRFS_INODE_NOCOMPRESS;
1097 cleanup_and_bail_uncompressed:
1098 	ret = add_async_extent(async_chunk, start, end - start + 1, NULL);
1099 	BUG_ON(ret);
1100 	if (cb)
1101 		cleanup_compressed_bio(cb);
1102 }
1103 
submit_uncompressed_range(struct btrfs_inode * inode,struct async_extent * async_extent,struct folio * locked_folio)1104 static void submit_uncompressed_range(struct btrfs_inode *inode,
1105 				      struct async_extent *async_extent,
1106 				      struct folio *locked_folio)
1107 {
1108 	u64 start = async_extent->start;
1109 	u64 end = async_extent->start + async_extent->ram_size - 1;
1110 	int ret;
1111 	struct writeback_control wbc = {
1112 		.sync_mode		= WB_SYNC_ALL,
1113 		.range_start		= start,
1114 		.range_end		= end,
1115 		.no_cgroup_owner	= 1,
1116 	};
1117 
1118 	wbc_attach_fdatawrite_inode(&wbc, &inode->vfs_inode);
1119 	ret = run_delalloc_cow(inode, locked_folio, start, end,
1120 			       &wbc, false);
1121 	wbc_detach_inode(&wbc);
1122 	if (ret < 0) {
1123 		if (locked_folio)
1124 			btrfs_folio_end_lock(inode->root->fs_info, locked_folio,
1125 					     start, async_extent->ram_size);
1126 		btrfs_err_rl(inode->root->fs_info,
1127 			"%s failed, root=%llu inode=%llu start=%llu len=%llu: %d",
1128 			     __func__, btrfs_root_id(inode->root),
1129 			     btrfs_ino(inode), start, async_extent->ram_size, ret);
1130 	}
1131 }
1132 
submit_one_async_extent(struct async_chunk * async_chunk,struct async_extent * async_extent,u64 * alloc_hint)1133 static void submit_one_async_extent(struct async_chunk *async_chunk,
1134 				    struct async_extent *async_extent,
1135 				    u64 *alloc_hint)
1136 {
1137 	struct btrfs_inode *inode = async_chunk->inode;
1138 	struct extent_io_tree *io_tree = &inode->io_tree;
1139 	struct btrfs_root *root = inode->root;
1140 	struct btrfs_fs_info *fs_info = root->fs_info;
1141 	struct btrfs_ordered_extent *ordered;
1142 	struct btrfs_file_extent file_extent;
1143 	struct btrfs_key ins;
1144 	struct folio *locked_folio = NULL;
1145 	struct extent_state *cached = NULL;
1146 	struct extent_map *em;
1147 	int ret = 0;
1148 	u32 compressed_size;
1149 	u64 start = async_extent->start;
1150 	u64 end = async_extent->start + async_extent->ram_size - 1;
1151 
1152 	if (async_chunk->blkcg_css)
1153 		kthread_associate_blkcg(async_chunk->blkcg_css);
1154 
1155 	/*
1156 	 * If async_chunk->locked_folio is in the async_extent range, we need to
1157 	 * handle it.
1158 	 */
1159 	if (async_chunk->locked_folio) {
1160 		u64 locked_folio_start = folio_pos(async_chunk->locked_folio);
1161 		u64 locked_folio_end = locked_folio_start +
1162 			folio_size(async_chunk->locked_folio) - 1;
1163 
1164 		if (!(start >= locked_folio_end || end <= locked_folio_start))
1165 			locked_folio = async_chunk->locked_folio;
1166 	}
1167 
1168 	if (!async_extent->cb) {
1169 		submit_uncompressed_range(inode, async_extent, locked_folio);
1170 		goto done;
1171 	}
1172 
1173 	compressed_size = async_extent->cb->bbio.bio.bi_iter.bi_size;
1174 	ret = btrfs_reserve_extent(root, async_extent->ram_size,
1175 				   compressed_size, compressed_size,
1176 				   0, *alloc_hint, &ins, true, true);
1177 	if (ret) {
1178 		/*
1179 		 * We can't reserve contiguous space for the compressed size.
1180 		 * Unlikely, but it's possible that we could have enough
1181 		 * non-contiguous space for the uncompressed size instead.  So
1182 		 * fall back to uncompressed.
1183 		 */
1184 		submit_uncompressed_range(inode, async_extent, locked_folio);
1185 		cleanup_compressed_bio(async_extent->cb);
1186 		async_extent->cb = NULL;
1187 		goto done;
1188 	}
1189 
1190 	btrfs_lock_extent(io_tree, start, end, &cached);
1191 
1192 	/* Here we're doing allocation and writeback of the compressed pages */
1193 	file_extent.disk_bytenr = ins.objectid;
1194 	file_extent.disk_num_bytes = ins.offset;
1195 	file_extent.ram_bytes = async_extent->ram_size;
1196 	file_extent.num_bytes = async_extent->ram_size;
1197 	file_extent.offset = 0;
1198 	file_extent.compression = async_extent->cb->compress_type;
1199 
1200 	async_extent->cb->bbio.bio.bi_iter.bi_sector = ins.objectid >> SECTOR_SHIFT;
1201 
1202 	em = btrfs_create_io_em(inode, start, &file_extent, BTRFS_ORDERED_COMPRESSED);
1203 	if (IS_ERR(em)) {
1204 		ret = PTR_ERR(em);
1205 		goto out_free_reserve;
1206 	}
1207 	btrfs_free_extent_map(em);
1208 
1209 	ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
1210 					     1U << BTRFS_ORDERED_COMPRESSED);
1211 	if (IS_ERR(ordered)) {
1212 		btrfs_drop_extent_map_range(inode, start, end, false);
1213 		ret = PTR_ERR(ordered);
1214 		goto out_free_reserve;
1215 	}
1216 	async_extent->cb->bbio.ordered = ordered;
1217 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1218 
1219 	/* Clear dirty, set writeback and unlock the pages. */
1220 	extent_clear_unlock_delalloc(inode, start, end,
1221 			NULL, &cached, EXTENT_LOCKED | EXTENT_DELALLOC,
1222 			PAGE_UNLOCK | PAGE_START_WRITEBACK);
1223 	btrfs_submit_bbio(&async_extent->cb->bbio, 0);
1224 	async_extent->cb = NULL;
1225 
1226 	*alloc_hint = ins.objectid + ins.offset;
1227 done:
1228 	if (async_chunk->blkcg_css)
1229 		kthread_associate_blkcg(NULL);
1230 	kfree(async_extent);
1231 	return;
1232 
1233 out_free_reserve:
1234 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1235 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, true);
1236 	mapping_set_error(inode->vfs_inode.i_mapping, -EIO);
1237 	extent_clear_unlock_delalloc(inode, start, end,
1238 				     NULL, &cached,
1239 				     EXTENT_LOCKED | EXTENT_DELALLOC |
1240 				     EXTENT_DELALLOC_NEW |
1241 				     EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
1242 				     PAGE_UNLOCK | PAGE_START_WRITEBACK |
1243 				     PAGE_END_WRITEBACK);
1244 	if (async_extent->cb)
1245 		cleanup_compressed_bio(async_extent->cb);
1246 	if (async_chunk->blkcg_css)
1247 		kthread_associate_blkcg(NULL);
1248 	btrfs_debug(fs_info,
1249 "async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d",
1250 		    btrfs_root_id(root), btrfs_ino(inode), start,
1251 		    async_extent->ram_size, ret);
1252 	kfree(async_extent);
1253 }
1254 
btrfs_get_extent_allocation_hint(struct btrfs_inode * inode,u64 start,u64 num_bytes)1255 u64 btrfs_get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
1256 				     u64 num_bytes)
1257 {
1258 	struct extent_map_tree *em_tree = &inode->extent_tree;
1259 	struct extent_map *em;
1260 	u64 alloc_hint = 0;
1261 
1262 	read_lock(&em_tree->lock);
1263 	em = btrfs_search_extent_mapping(em_tree, start, num_bytes);
1264 	if (em) {
1265 		/*
1266 		 * if block start isn't an actual block number then find the
1267 		 * first block in this inode and use that as a hint.  If that
1268 		 * block is also bogus then just don't worry about it.
1269 		 */
1270 		if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
1271 			btrfs_free_extent_map(em);
1272 			em = btrfs_search_extent_mapping(em_tree, 0, 0);
1273 			if (em && em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
1274 				alloc_hint = btrfs_extent_map_block_start(em);
1275 			if (em)
1276 				btrfs_free_extent_map(em);
1277 		} else {
1278 			alloc_hint = btrfs_extent_map_block_start(em);
1279 			btrfs_free_extent_map(em);
1280 		}
1281 	}
1282 	read_unlock(&em_tree->lock);
1283 
1284 	return alloc_hint;
1285 }
1286 
1287 /*
1288  * Handle COW for one range.
1289  *
1290  * @ins:		The key representing the allocated range.
1291  * @file_offset:	The file offset of the COW range
1292  * @num_bytes:		The expected length of the COW range
1293  *			The actually allocated length can be smaller than it.
1294  * @min_alloc_size:	The minimal extent size.
1295  * @alloc_hint:		The hint for the extent allocator.
1296  * @ret_alloc_size:	The COW range handles by this function.
1297  *
1298  * Return 0 if everything is fine and update @ret_alloc_size updated.  The
1299  * range is still locked, and caller should unlock the range after everything
1300  * is done or for error handling.
1301  *
1302  * Return <0 for error and @is updated for where the extra cleanup should
1303  * happen. The range [file_offset, file_offset + ret_alloc_size) will be
1304  * cleaned up by this function.
1305  */
cow_one_range(struct btrfs_inode * inode,struct folio * locked_folio,struct btrfs_key * ins,struct extent_state ** cached,u64 file_offset,u32 num_bytes,u32 min_alloc_size,u64 alloc_hint,u32 * ret_alloc_size)1306 static int cow_one_range(struct btrfs_inode *inode, struct folio *locked_folio,
1307 			 struct btrfs_key *ins, struct extent_state **cached,
1308 			 u64 file_offset, u32 num_bytes, u32 min_alloc_size,
1309 			 u64 alloc_hint, u32 *ret_alloc_size)
1310 {
1311 	struct btrfs_root *root = inode->root;
1312 	struct btrfs_fs_info *fs_info = root->fs_info;
1313 	struct btrfs_ordered_extent *ordered;
1314 	struct btrfs_file_extent file_extent;
1315 	struct extent_map *em;
1316 	u32 cur_len = 0;
1317 	u64 cur_end;
1318 	int ret;
1319 
1320 	ret = btrfs_reserve_extent(root, num_bytes, num_bytes, min_alloc_size,
1321 				   0, alloc_hint, ins, true, true);
1322 	if (ret < 0) {
1323 		*ret_alloc_size = cur_len;
1324 		return ret;
1325 	}
1326 
1327 	cur_len = ins->offset;
1328 	cur_end = file_offset + cur_len - 1;
1329 
1330 	file_extent.disk_bytenr = ins->objectid;
1331 	file_extent.disk_num_bytes = ins->offset;
1332 	file_extent.num_bytes = ins->offset;
1333 	file_extent.ram_bytes = ins->offset;
1334 	file_extent.offset = 0;
1335 	file_extent.compression = BTRFS_COMPRESS_NONE;
1336 
1337 	/*
1338 	 * Locked range will be released either during error clean up (inside
1339 	 * this function or by the caller for previously successful ranges) or
1340 	 * after the whole range is finished.
1341 	 */
1342 	btrfs_lock_extent(&inode->io_tree, file_offset, cur_end, cached);
1343 	em = btrfs_create_io_em(inode, file_offset, &file_extent, BTRFS_ORDERED_REGULAR);
1344 	if (IS_ERR(em)) {
1345 		ret = PTR_ERR(em);
1346 		goto free_reserved;
1347 	}
1348 	btrfs_free_extent_map(em);
1349 
1350 	ordered = btrfs_alloc_ordered_extent(inode, file_offset, &file_extent,
1351 					     1U << BTRFS_ORDERED_REGULAR);
1352 	if (IS_ERR(ordered)) {
1353 		btrfs_drop_extent_map_range(inode, file_offset, cur_end, false);
1354 		ret = PTR_ERR(ordered);
1355 		goto free_reserved;
1356 	}
1357 
1358 	if (btrfs_is_data_reloc_root(root)) {
1359 		ret = btrfs_reloc_clone_csums(ordered);
1360 
1361 		/*
1362 		 * Only drop cache here, and process as normal.
1363 		 *
1364 		 * We must not allow extent_clear_unlock_delalloc() at
1365 		 * free_reserved label to free meta of this ordered extent, as
1366 		 * its meta should be freed by btrfs_finish_ordered_io().
1367 		 *
1368 		 * So we must continue until @start is increased to
1369 		 * skip current ordered extent.
1370 		 */
1371 		if (ret)
1372 			btrfs_drop_extent_map_range(inode, file_offset,
1373 						    cur_end, false);
1374 	}
1375 	btrfs_put_ordered_extent(ordered);
1376 	btrfs_dec_block_group_reservations(fs_info, ins->objectid);
1377 	/*
1378 	 * Error handling for btrfs_reloc_clone_csums().
1379 	 *
1380 	 * Treat the range as finished, thus only clear EXTENT_LOCKED | EXTENT_DELALLOC.
1381 	 * The accounting will be done by ordered extents.
1382 	 */
1383 	if (unlikely(ret < 0)) {
1384 		btrfs_cleanup_ordered_extents(inode, file_offset, cur_len);
1385 		extent_clear_unlock_delalloc(inode, file_offset, cur_end, locked_folio, cached,
1386 					     EXTENT_LOCKED | EXTENT_DELALLOC,
1387 					     PAGE_UNLOCK | PAGE_START_WRITEBACK |
1388 					     PAGE_END_WRITEBACK);
1389 		mapping_set_error(inode->vfs_inode.i_mapping, -EIO);
1390 	}
1391 	*ret_alloc_size = cur_len;
1392 	return ret;
1393 
1394 free_reserved:
1395 	/*
1396 	 * If we have reserved an extent for the current range and failed to
1397 	 * create the respective extent map or ordered extent, it means that
1398 	 * when we reserved the extent we decremented the extent's size from
1399 	 * the data space_info's bytes_may_use counter and
1400 	 * incremented the space_info's bytes_reserved counter by the same
1401 	 * amount.
1402 	 *
1403 	 * We must make sure extent_clear_unlock_delalloc() does not try
1404 	 * to decrement again the data space_info's bytes_may_use counter, which
1405 	 * will be handled by btrfs_free_reserved_extent().
1406 	 *
1407 	 * Therefore we do not pass it the flag EXTENT_CLEAR_DATA_RESV, but only
1408 	 * EXTENT_CLEAR_META_RESV.
1409 	 */
1410 	extent_clear_unlock_delalloc(inode, file_offset, cur_end, locked_folio, cached,
1411 				     EXTENT_LOCKED | EXTENT_DELALLOC |
1412 				     EXTENT_DELALLOC_NEW |
1413 				     EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV,
1414 				     PAGE_UNLOCK | PAGE_START_WRITEBACK |
1415 				     PAGE_END_WRITEBACK);
1416 	btrfs_qgroup_free_data(inode, NULL, file_offset, cur_len, NULL);
1417 	btrfs_dec_block_group_reservations(fs_info, ins->objectid);
1418 	btrfs_free_reserved_extent(fs_info, ins->objectid, ins->offset, true);
1419 	mapping_set_error(inode->vfs_inode.i_mapping, -EIO);
1420 	*ret_alloc_size = cur_len;
1421 	/*
1422 	 * We should not return -EAGAIN where it's a special return code for
1423 	 * zoned to catch btrfs_reserved_extent().
1424 	 */
1425 	ASSERT(ret != -EAGAIN);
1426 	return ret;
1427 }
1428 
1429 /*
1430  * when extent_io.c finds a delayed allocation range in the file,
1431  * the call backs end up in this code.  The basic idea is to
1432  * allocate extents on disk for the range, and create ordered data structs
1433  * in ram to track those extents.
1434  *
1435  * locked_folio is the folio that writepage had locked already.  We use
1436  * it to make sure we don't do extra locks or unlocks.
1437  *
1438  * When this function fails, it unlocks all folios except @locked_folio.
1439  *
1440  * When this function successfully creates an inline extent, it returns 1 and
1441  * unlocks all folios including locked_folio and starts I/O on them.
1442  * (In reality inline extents are limited to a single block, so locked_folio is
1443  * the only folio handled anyway).
1444  *
1445  * When this function succeed and creates a normal extent, the folio locking
1446  * status depends on the passed in flags:
1447  *
1448  * - If COW_FILE_RANGE_KEEP_LOCKED flag is set, all folios are kept locked.
1449  * - Else all folios except for @locked_folio are unlocked.
1450  *
1451  * When a failure happens in the second or later iteration of the
1452  * while-loop, the ordered extents created in previous iterations are cleaned up.
1453  */
cow_file_range(struct btrfs_inode * inode,struct folio * locked_folio,u64 start,u64 end,u64 * done_offset,unsigned long flags)1454 static noinline int cow_file_range(struct btrfs_inode *inode,
1455 				   struct folio *locked_folio, u64 start,
1456 				   u64 end, u64 *done_offset,
1457 				   unsigned long flags)
1458 {
1459 	struct btrfs_root *root = inode->root;
1460 	struct btrfs_fs_info *fs_info = root->fs_info;
1461 	struct extent_state *cached = NULL;
1462 	u64 alloc_hint = 0;
1463 	u64 orig_start = start;
1464 	u64 num_bytes;
1465 	u32 min_alloc_size;
1466 	u32 blocksize = fs_info->sectorsize;
1467 	u32 cur_alloc_size = 0;
1468 	struct btrfs_key ins;
1469 	unsigned clear_bits;
1470 	unsigned long page_ops;
1471 	int ret = 0;
1472 
1473 	if (btrfs_is_shutdown(fs_info)) {
1474 		ret = -EIO;
1475 		goto out_unlock;
1476 	}
1477 
1478 	if (btrfs_is_free_space_inode(inode)) {
1479 		ret = -EINVAL;
1480 		goto out_unlock;
1481 	}
1482 
1483 	num_bytes = ALIGN(end - start + 1, blocksize);
1484 	num_bytes = max(blocksize,  num_bytes);
1485 	ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy));
1486 
1487 	inode_should_defrag(inode, start, end, num_bytes, SZ_64K);
1488 
1489 	if (!(flags & COW_FILE_RANGE_NO_INLINE)) {
1490 		/* lets try to make an inline extent */
1491 		ret = cow_file_range_inline(inode, locked_folio, start, end, 0,
1492 					    BTRFS_COMPRESS_NONE, NULL, false);
1493 		if (ret <= 0) {
1494 			/*
1495 			 * We succeeded, return 1 so the caller knows we're done
1496 			 * with this page and already handled the IO.
1497 			 *
1498 			 * If there was an error then cow_file_range_inline() has
1499 			 * already done the cleanup.
1500 			 */
1501 			if (ret == 0)
1502 				ret = 1;
1503 			goto done;
1504 		}
1505 	}
1506 
1507 	alloc_hint = btrfs_get_extent_allocation_hint(inode, start, num_bytes);
1508 
1509 	/*
1510 	 * We're not doing compressed IO, don't unlock the first page (which
1511 	 * the caller expects to stay locked), don't clear any dirty bits and
1512 	 * don't set any writeback bits.
1513 	 *
1514 	 * Do set the Ordered (Private2) bit so we know this page was properly
1515 	 * setup for writepage.
1516 	 */
1517 	page_ops = ((flags & COW_FILE_RANGE_KEEP_LOCKED) ? 0 : PAGE_UNLOCK);
1518 	page_ops |= PAGE_SET_ORDERED;
1519 
1520 	/*
1521 	 * Relocation relies on the relocated extents to have exactly the same
1522 	 * size as the original extents. Normally writeback for relocation data
1523 	 * extents follows a NOCOW path because relocation preallocates the
1524 	 * extents. However, due to an operation such as scrub turning a block
1525 	 * group to RO mode, it may fallback to COW mode, so we must make sure
1526 	 * an extent allocated during COW has exactly the requested size and can
1527 	 * not be split into smaller extents, otherwise relocation breaks and
1528 	 * fails during the stage where it updates the bytenr of file extent
1529 	 * items.
1530 	 */
1531 	if (btrfs_is_data_reloc_root(root))
1532 		min_alloc_size = num_bytes;
1533 	else
1534 		min_alloc_size = fs_info->sectorsize;
1535 
1536 	while (num_bytes > 0) {
1537 		ret = cow_one_range(inode, locked_folio, &ins, &cached, start,
1538 				    num_bytes, min_alloc_size, alloc_hint, &cur_alloc_size);
1539 
1540 		if (ret == -EAGAIN) {
1541 			/*
1542 			 * cow_one_range() only returns -EAGAIN for zoned
1543 			 * file systems (from btrfs_reserve_extent()), which
1544 			 * is an indication that there are
1545 			 * no active zones to allocate from at the moment.
1546 			 *
1547 			 * If this is the first loop iteration, wait for at
1548 			 * least one zone to finish before retrying the
1549 			 * allocation.  Otherwise ask the caller to write out
1550 			 * the already allocated blocks before coming back to
1551 			 * us, or return -ENOSPC if it can't handle retries.
1552 			 */
1553 			ASSERT(btrfs_is_zoned(fs_info));
1554 			if (start == orig_start) {
1555 				wait_on_bit_io(&inode->root->fs_info->flags,
1556 					       BTRFS_FS_NEED_ZONE_FINISH,
1557 					       TASK_UNINTERRUPTIBLE);
1558 				continue;
1559 			}
1560 			if (done_offset) {
1561 				/*
1562 				 * Move @end to the end of the processed range,
1563 				 * and exit the loop to unlock the processed extents.
1564 				 */
1565 				end = start - 1;
1566 				ret = 0;
1567 				break;
1568 			}
1569 			ret = -ENOSPC;
1570 		}
1571 		if (ret < 0)
1572 			goto out_unlock;
1573 
1574 		/* We should not allocate an extent larger than requested.*/
1575 		ASSERT(cur_alloc_size <= num_bytes);
1576 
1577 		num_bytes -= cur_alloc_size;
1578 		alloc_hint = ins.objectid + ins.offset;
1579 		start += cur_alloc_size;
1580 		cur_alloc_size = 0;
1581 	}
1582 	extent_clear_unlock_delalloc(inode, orig_start, end, locked_folio, &cached,
1583 				     EXTENT_LOCKED | EXTENT_DELALLOC, page_ops);
1584 done:
1585 	if (done_offset)
1586 		*done_offset = end;
1587 	return ret;
1588 
1589 out_unlock:
1590 	/*
1591 	 * Now, we have three regions to clean up:
1592 	 *
1593 	 * |-------(1)----|---(2)---|-------------(3)----------|
1594 	 * `- orig_start  `- start  `- start + cur_alloc_size  `- end
1595 	 *
1596 	 * We process each region below.
1597 	 */
1598 
1599 	/*
1600 	 * For the range (1). We have already instantiated the ordered extents
1601 	 * for this region, thus we need to cleanup those ordered extents.
1602 	 * EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV
1603 	 * are also handled by the ordered extents cleanup.
1604 	 *
1605 	 * So here we only clear EXTENT_LOCKED and EXTENT_DELALLOC flag, and
1606 	 * finish the writeback of the involved folios, which will be never submitted.
1607 	 */
1608 	if (orig_start < start) {
1609 		clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC;
1610 		page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
1611 
1612 		if (!locked_folio)
1613 			mapping_set_error(inode->vfs_inode.i_mapping, ret);
1614 
1615 		btrfs_cleanup_ordered_extents(inode, orig_start, start - orig_start);
1616 		extent_clear_unlock_delalloc(inode, orig_start, start - 1,
1617 					     locked_folio, NULL, clear_bits, page_ops);
1618 	}
1619 
1620 	clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
1621 		     EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
1622 	page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
1623 
1624 	/*
1625 	 * For the range (2) the error handling is done by cow_one_range() itself.
1626 	 * Nothing needs to be done.
1627 	 *
1628 	 * For the range (3). We never touched the region. In addition to the
1629 	 * clear_bits above, we add EXTENT_CLEAR_DATA_RESV to release the data
1630 	 * space_info's bytes_may_use counter, reserved in
1631 	 * btrfs_check_data_free_space().
1632 	 */
1633 	if (start + cur_alloc_size < end) {
1634 		clear_bits |= EXTENT_CLEAR_DATA_RESV;
1635 		extent_clear_unlock_delalloc(inode, start + cur_alloc_size,
1636 					     end, locked_folio,
1637 					     &cached, clear_bits, page_ops);
1638 		btrfs_qgroup_free_data(inode, NULL, start + cur_alloc_size,
1639 				       end - start - cur_alloc_size + 1, NULL);
1640 	}
1641 	btrfs_err(fs_info,
1642 "%s failed, root=%llu inode=%llu start=%llu len=%llu cur_offset=%llu cur_alloc_size=%u: %d",
1643 		  __func__, btrfs_root_id(inode->root),
1644 		  btrfs_ino(inode), orig_start, end + 1 - orig_start,
1645 		  start, cur_alloc_size, ret);
1646 	return ret;
1647 }
1648 
1649 /*
1650  * Phase two of compressed writeback.  This is the ordered portion of the code,
1651  * which only gets called in the order the work was queued.  We walk all the
1652  * async extents created by compress_file_range and send them down to the disk.
1653  *
1654  * If called with @do_free == true then it'll try to finish the work and free
1655  * the work struct eventually.
1656  */
submit_compressed_extents(struct btrfs_work * work,bool do_free)1657 static noinline void submit_compressed_extents(struct btrfs_work *work, bool do_free)
1658 {
1659 	struct async_chunk *async_chunk = container_of(work, struct async_chunk,
1660 						     work);
1661 	struct btrfs_fs_info *fs_info = btrfs_work_owner(work);
1662 	struct async_extent *async_extent;
1663 	unsigned long nr_pages;
1664 	u64 alloc_hint = 0;
1665 
1666 	if (do_free) {
1667 		struct async_cow *async_cow;
1668 
1669 		btrfs_add_delayed_iput(async_chunk->inode);
1670 		if (async_chunk->blkcg_css)
1671 			css_put(async_chunk->blkcg_css);
1672 
1673 		async_cow = async_chunk->async_cow;
1674 		if (atomic_dec_and_test(&async_cow->num_chunks))
1675 			kvfree(async_cow);
1676 		return;
1677 	}
1678 
1679 	nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
1680 		PAGE_SHIFT;
1681 
1682 	while (!list_empty(&async_chunk->extents)) {
1683 		async_extent = list_first_entry(&async_chunk->extents,
1684 						struct async_extent, list);
1685 		list_del(&async_extent->list);
1686 		submit_one_async_extent(async_chunk, async_extent, &alloc_hint);
1687 	}
1688 
1689 	/* atomic_sub_return implies a barrier */
1690 	if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
1691 	    5 * SZ_1M)
1692 		cond_wake_up_nomb(&fs_info->async_submit_wait);
1693 }
1694 
run_delalloc_compressed(struct btrfs_inode * inode,struct folio * locked_folio,u64 start,u64 end,struct writeback_control * wbc)1695 static bool run_delalloc_compressed(struct btrfs_inode *inode,
1696 				    struct folio *locked_folio, u64 start,
1697 				    u64 end, struct writeback_control *wbc)
1698 {
1699 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1700 	struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc);
1701 	struct async_cow *ctx;
1702 	struct async_chunk *async_chunk;
1703 	unsigned long nr_pages;
1704 	u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K);
1705 	int i;
1706 	unsigned nofs_flag;
1707 	const blk_opf_t write_flags = wbc_to_write_flags(wbc);
1708 
1709 	nofs_flag = memalloc_nofs_save();
1710 	ctx = kvmalloc_flex(*ctx, chunks, num_chunks);
1711 	memalloc_nofs_restore(nofs_flag);
1712 	if (!ctx)
1713 		return false;
1714 
1715 	set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags);
1716 
1717 	async_chunk = ctx->chunks;
1718 	atomic_set(&ctx->num_chunks, num_chunks);
1719 
1720 	for (i = 0; i < num_chunks; i++) {
1721 		u64 cur_end = min(end, start + SZ_512K - 1);
1722 
1723 		/*
1724 		 * igrab is called higher up in the call chain, take only the
1725 		 * lightweight reference for the callback lifetime
1726 		 */
1727 		ihold(&inode->vfs_inode);
1728 		async_chunk[i].async_cow = ctx;
1729 		async_chunk[i].inode = inode;
1730 		async_chunk[i].start = start;
1731 		async_chunk[i].end = cur_end;
1732 		async_chunk[i].write_flags = write_flags;
1733 		INIT_LIST_HEAD(&async_chunk[i].extents);
1734 
1735 		/*
1736 		 * The locked_folio comes all the way from writepage and its
1737 		 * the original folio we were actually given.  As we spread
1738 		 * this large delalloc region across multiple async_chunk
1739 		 * structs, only the first struct needs a pointer to
1740 		 * locked_folio.
1741 		 *
1742 		 * This way we don't need racey decisions about who is supposed
1743 		 * to unlock it.
1744 		 */
1745 		if (locked_folio) {
1746 			/*
1747 			 * Depending on the compressibility, the pages might or
1748 			 * might not go through async.  We want all of them to
1749 			 * be accounted against wbc once.  Let's do it here
1750 			 * before the paths diverge.  wbc accounting is used
1751 			 * only for foreign writeback detection and doesn't
1752 			 * need full accuracy.  Just account the whole thing
1753 			 * against the first page.
1754 			 */
1755 			wbc_account_cgroup_owner(wbc, locked_folio,
1756 						 cur_end - start);
1757 			async_chunk[i].locked_folio = locked_folio;
1758 			locked_folio = NULL;
1759 		} else {
1760 			async_chunk[i].locked_folio = NULL;
1761 		}
1762 
1763 		if (blkcg_css != blkcg_root_css) {
1764 			css_get(blkcg_css);
1765 			async_chunk[i].blkcg_css = blkcg_css;
1766 			async_chunk[i].write_flags |= REQ_BTRFS_CGROUP_PUNT;
1767 		} else {
1768 			async_chunk[i].blkcg_css = NULL;
1769 		}
1770 
1771 		btrfs_init_work(&async_chunk[i].work, compress_file_range,
1772 				submit_compressed_extents);
1773 
1774 		nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
1775 		atomic_add(nr_pages, &fs_info->async_delalloc_pages);
1776 
1777 		btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work);
1778 
1779 		start = cur_end + 1;
1780 	}
1781 	return true;
1782 }
1783 
1784 /*
1785  * Run the delalloc range from start to end, and write back any dirty pages
1786  * covered by the range.
1787  */
run_delalloc_cow(struct btrfs_inode * inode,struct folio * locked_folio,u64 start,u64 end,struct writeback_control * wbc,bool pages_dirty)1788 static noinline int run_delalloc_cow(struct btrfs_inode *inode,
1789 				     struct folio *locked_folio, u64 start,
1790 				     u64 end, struct writeback_control *wbc,
1791 				     bool pages_dirty)
1792 {
1793 	u64 done_offset = end;
1794 	int ret;
1795 
1796 	while (start <= end) {
1797 		ret = cow_file_range(inode, locked_folio, start, end,
1798 				     &done_offset, COW_FILE_RANGE_KEEP_LOCKED);
1799 		if (ret)
1800 			return ret;
1801 		extent_write_locked_range(&inode->vfs_inode, locked_folio,
1802 					  start, done_offset, wbc, pages_dirty);
1803 		start = done_offset + 1;
1804 	}
1805 
1806 	return 1;
1807 }
1808 
fallback_to_cow(struct btrfs_inode * inode,struct folio * locked_folio,const u64 start,const u64 end)1809 static int fallback_to_cow(struct btrfs_inode *inode,
1810 			   struct folio *locked_folio, const u64 start,
1811 			   const u64 end)
1812 {
1813 	const bool is_space_ino = btrfs_is_free_space_inode(inode);
1814 	const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root);
1815 	const u64 range_bytes = end + 1 - start;
1816 	struct extent_io_tree *io_tree = &inode->io_tree;
1817 	struct extent_state *cached_state = NULL;
1818 	u64 range_start = start;
1819 	u64 count;
1820 	int ret;
1821 
1822 	/*
1823 	 * If EXTENT_NORESERVE is set it means that when the buffered write was
1824 	 * made we had not enough available data space and therefore we did not
1825 	 * reserve data space for it, since we though we could do NOCOW for the
1826 	 * respective file range (either there is prealloc extent or the inode
1827 	 * has the NOCOW bit set).
1828 	 *
1829 	 * However when we need to fallback to COW mode (because for example the
1830 	 * block group for the corresponding extent was turned to RO mode by a
1831 	 * scrub or relocation) we need to do the following:
1832 	 *
1833 	 * 1) We increment the bytes_may_use counter of the data space info.
1834 	 *    If COW succeeds, it allocates a new data extent and after doing
1835 	 *    that it decrements the space info's bytes_may_use counter and
1836 	 *    increments its bytes_reserved counter by the same amount (we do
1837 	 *    this at btrfs_add_reserved_bytes()). So we need to increment the
1838 	 *    bytes_may_use counter to compensate (when space is reserved at
1839 	 *    buffered write time, the bytes_may_use counter is incremented);
1840 	 *
1841 	 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so
1842 	 *    that if the COW path fails for any reason, it decrements (through
1843 	 *    extent_clear_unlock_delalloc()) the bytes_may_use counter of the
1844 	 *    data space info, which we incremented in the step above.
1845 	 *
1846 	 * If we need to fallback to cow and the inode corresponds to a free
1847 	 * space cache inode or an inode of the data relocation tree, we must
1848 	 * also increment bytes_may_use of the data space_info for the same
1849 	 * reason. Space caches and relocated data extents always get a prealloc
1850 	 * extent for them, however scrub or balance may have set the block
1851 	 * group that contains that extent to RO mode and therefore force COW
1852 	 * when starting writeback.
1853 	 */
1854 	btrfs_lock_extent(io_tree, start, end, &cached_state);
1855 	count = btrfs_count_range_bits(io_tree, &range_start, end, range_bytes,
1856 				       EXTENT_NORESERVE, 0, NULL);
1857 	if (count > 0 || is_space_ino || is_reloc_ino) {
1858 		u64 bytes = count;
1859 		struct btrfs_fs_info *fs_info = inode->root->fs_info;
1860 		struct btrfs_space_info *sinfo = fs_info->data_sinfo;
1861 
1862 		if (is_space_ino || is_reloc_ino)
1863 			bytes = range_bytes;
1864 
1865 		spin_lock(&sinfo->lock);
1866 		btrfs_space_info_update_bytes_may_use(sinfo, bytes);
1867 		spin_unlock(&sinfo->lock);
1868 
1869 		if (count > 0)
1870 			btrfs_clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE,
1871 					       &cached_state);
1872 	}
1873 	btrfs_unlock_extent(io_tree, start, end, &cached_state);
1874 
1875 	/*
1876 	 * Don't try to create inline extents, as a mix of inline extent that
1877 	 * is written out and unlocked directly and a normal NOCOW extent
1878 	 * doesn't work.
1879 	 *
1880 	 * And here we do not unlock the folio after a successful run.
1881 	 * The folios will be unlocked after everything is finished, or by error handling.
1882 	 *
1883 	 * This is to ensure error handling won't need to clear dirty/ordered flags without
1884 	 * a locked folio, which can race with writeback.
1885 	 */
1886 	ret = cow_file_range(inode, locked_folio, start, end, NULL,
1887 			     COW_FILE_RANGE_NO_INLINE | COW_FILE_RANGE_KEEP_LOCKED);
1888 	ASSERT(ret != 1);
1889 	return ret;
1890 }
1891 
1892 struct can_nocow_file_extent_args {
1893 	/* Input fields. */
1894 
1895 	/* Start file offset of the range we want to NOCOW. */
1896 	u64 start;
1897 	/* End file offset (inclusive) of the range we want to NOCOW. */
1898 	u64 end;
1899 	bool writeback_path;
1900 	/*
1901 	 * Free the path passed to can_nocow_file_extent() once it's not needed
1902 	 * anymore.
1903 	 */
1904 	bool free_path;
1905 
1906 	/*
1907 	 * Output fields. Only set when can_nocow_file_extent() returns 1.
1908 	 * The expected file extent for the NOCOW write.
1909 	 */
1910 	struct btrfs_file_extent file_extent;
1911 };
1912 
1913 /*
1914  * Check if we can NOCOW the file extent that the path points to.
1915  * This function may return with the path released, so the caller should check
1916  * if path->nodes[0] is NULL or not if it needs to use the path afterwards.
1917  *
1918  * Returns: < 0 on error
1919  *            0 if we can not NOCOW
1920  *            1 if we can NOCOW
1921  */
can_nocow_file_extent(struct btrfs_path * path,struct btrfs_key * key,struct btrfs_inode * inode,struct can_nocow_file_extent_args * args)1922 static int can_nocow_file_extent(struct btrfs_path *path,
1923 				 struct btrfs_key *key,
1924 				 struct btrfs_inode *inode,
1925 				 struct can_nocow_file_extent_args *args)
1926 {
1927 	const bool is_freespace_inode = btrfs_is_free_space_inode(inode);
1928 	struct extent_buffer *leaf = path->nodes[0];
1929 	struct btrfs_root *root = inode->root;
1930 	struct btrfs_file_extent_item *fi;
1931 	struct btrfs_root *csum_root;
1932 	u64 io_start;
1933 	u64 extent_end;
1934 	u8 extent_type;
1935 	int can_nocow = 0;
1936 	int ret = 0;
1937 	bool nowait = path->nowait;
1938 
1939 	fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
1940 	extent_type = btrfs_file_extent_type(leaf, fi);
1941 
1942 	if (extent_type == BTRFS_FILE_EXTENT_INLINE)
1943 		goto out;
1944 
1945 	if (!(inode->flags & BTRFS_INODE_NODATACOW) &&
1946 	    extent_type == BTRFS_FILE_EXTENT_REG)
1947 		goto out;
1948 
1949 	/*
1950 	 * If the extent was created before the generation where the last snapshot
1951 	 * for its subvolume was created, then this implies the extent is shared,
1952 	 * hence we must COW.
1953 	 */
1954 	if (btrfs_file_extent_generation(leaf, fi) <=
1955 	    btrfs_root_last_snapshot(&root->root_item))
1956 		goto out;
1957 
1958 	/* An explicit hole, must COW. */
1959 	if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
1960 		goto out;
1961 
1962 	/* Compressed/encrypted/encoded extents must be COWed. */
1963 	if (btrfs_file_extent_compression(leaf, fi) ||
1964 	    btrfs_file_extent_encryption(leaf, fi) ||
1965 	    btrfs_file_extent_other_encoding(leaf, fi))
1966 		goto out;
1967 
1968 	extent_end = btrfs_file_extent_end(path);
1969 
1970 	args->file_extent.disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1971 	args->file_extent.disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1972 	args->file_extent.ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1973 	args->file_extent.offset = btrfs_file_extent_offset(leaf, fi);
1974 	args->file_extent.compression = btrfs_file_extent_compression(leaf, fi);
1975 
1976 	/*
1977 	 * The following checks can be expensive, as they need to take other
1978 	 * locks and do btree or rbtree searches, so release the path to avoid
1979 	 * blocking other tasks for too long.
1980 	 */
1981 	btrfs_release_path(path);
1982 
1983 	ret = btrfs_cross_ref_exist(inode, key->offset - args->file_extent.offset,
1984 				    args->file_extent.disk_bytenr, path);
1985 	WARN_ON_ONCE(ret > 0 && is_freespace_inode);
1986 	if (ret != 0)
1987 		goto out;
1988 
1989 	if (args->free_path) {
1990 		/*
1991 		 * We don't need the path anymore, plus through the
1992 		 * btrfs_lookup_csums_list() call below we will end up allocating
1993 		 * another path. So free the path to avoid unnecessary extra
1994 		 * memory usage.
1995 		 */
1996 		btrfs_free_path(path);
1997 		path = NULL;
1998 	}
1999 
2000 	/* If there are pending snapshots for this root, we must COW. */
2001 	if (args->writeback_path && !is_freespace_inode &&
2002 	    atomic_read(&root->snapshot_force_cow))
2003 		goto out;
2004 
2005 	args->file_extent.num_bytes = min(args->end + 1, extent_end) - args->start;
2006 	args->file_extent.offset += args->start - key->offset;
2007 	io_start = args->file_extent.disk_bytenr + args->file_extent.offset;
2008 
2009 	/*
2010 	 * Force COW if csums exist in the range. This ensures that csums for a
2011 	 * given extent are either valid or do not exist.
2012 	 */
2013 
2014 	csum_root = btrfs_csum_root(root->fs_info, io_start);
2015 	if (unlikely(!csum_root)) {
2016 		btrfs_err(root->fs_info,
2017 			  "missing csum root for extent at bytenr %llu", io_start);
2018 		ret = -EUCLEAN;
2019 		goto out;
2020 	}
2021 
2022 	ret = btrfs_lookup_csums_list(csum_root, io_start,
2023 				      io_start + args->file_extent.num_bytes - 1,
2024 				      NULL, nowait);
2025 	WARN_ON_ONCE(ret > 0 && is_freespace_inode);
2026 	if (ret != 0)
2027 		goto out;
2028 
2029 	can_nocow = 1;
2030  out:
2031 	if (args->free_path && path)
2032 		btrfs_free_path(path);
2033 
2034 	return ret < 0 ? ret : can_nocow;
2035 }
2036 
nocow_one_range(struct btrfs_inode * inode,struct folio * locked_folio,struct extent_state ** cached,struct can_nocow_file_extent_args * nocow_args,u64 file_pos,bool is_prealloc)2037 static int nocow_one_range(struct btrfs_inode *inode, struct folio *locked_folio,
2038 			   struct extent_state **cached,
2039 			   struct can_nocow_file_extent_args *nocow_args,
2040 			   u64 file_pos, bool is_prealloc)
2041 {
2042 	struct btrfs_ordered_extent *ordered;
2043 	const u64 len = nocow_args->file_extent.num_bytes;
2044 	const u64 end = file_pos + len - 1;
2045 	int ret = 0;
2046 
2047 	btrfs_lock_extent(&inode->io_tree, file_pos, end, cached);
2048 
2049 	if (is_prealloc) {
2050 		struct extent_map *em;
2051 
2052 		em = btrfs_create_io_em(inode, file_pos, &nocow_args->file_extent,
2053 					BTRFS_ORDERED_PREALLOC);
2054 		if (IS_ERR(em)) {
2055 			ret = PTR_ERR(em);
2056 			goto error;
2057 		}
2058 		btrfs_free_extent_map(em);
2059 	}
2060 
2061 	ordered = btrfs_alloc_ordered_extent(inode, file_pos, &nocow_args->file_extent,
2062 					     is_prealloc
2063 					     ? (1U << BTRFS_ORDERED_PREALLOC)
2064 					     : (1U << BTRFS_ORDERED_NOCOW));
2065 	if (IS_ERR(ordered)) {
2066 		if (is_prealloc)
2067 			btrfs_drop_extent_map_range(inode, file_pos, end, false);
2068 		ret = PTR_ERR(ordered);
2069 		goto error;
2070 	}
2071 
2072 	if (btrfs_is_data_reloc_root(inode->root))
2073 		/*
2074 		 * Errors are handled later, as we must prevent
2075 		 * extent_clear_unlock_delalloc() in error handler from freeing
2076 		 * metadata of the created ordered extent.
2077 		 */
2078 		ret = btrfs_reloc_clone_csums(ordered);
2079 	btrfs_put_ordered_extent(ordered);
2080 
2081 	if (ret < 0)
2082 		goto error;
2083 	extent_clear_unlock_delalloc(inode, file_pos, end, locked_folio, cached,
2084 				     EXTENT_LOCKED | EXTENT_DELALLOC |
2085 				     EXTENT_CLEAR_DATA_RESV,
2086 				     PAGE_SET_ORDERED);
2087 	return ret;
2088 
2089 error:
2090 	btrfs_cleanup_ordered_extents(inode, file_pos, len);
2091 	extent_clear_unlock_delalloc(inode, file_pos, end, locked_folio, cached,
2092 				     EXTENT_LOCKED | EXTENT_DELALLOC |
2093 				     EXTENT_CLEAR_DATA_RESV,
2094 				     PAGE_UNLOCK | PAGE_START_WRITEBACK |
2095 				     PAGE_END_WRITEBACK);
2096 	btrfs_err(inode->root->fs_info,
2097 		  "%s failed, root=%lld inode=%llu start=%llu len=%llu: %d",
2098 		  __func__, btrfs_root_id(inode->root), btrfs_ino(inode),
2099 		  file_pos, len, ret);
2100 	return ret;
2101 }
2102 
2103 /*
2104  * When nocow writeback calls back.  This checks for snapshots or COW copies
2105  * of the extents that exist in the file, and COWs the file as required.
2106  *
2107  * If no cow copies or snapshots exist, we write directly to the existing
2108  * blocks on disk
2109  */
run_delalloc_nocow(struct btrfs_inode * inode,struct folio * locked_folio,const u64 start,const u64 end)2110 static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
2111 				       struct folio *locked_folio,
2112 				       const u64 start, const u64 end)
2113 {
2114 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2115 	struct btrfs_root *root = inode->root;
2116 	struct btrfs_path *path = NULL;
2117 	u64 cow_start = (u64)-1;
2118 	/*
2119 	 * If not 0, represents the inclusive end of the last fallback_to_cow()
2120 	 * range. Only for error handling.
2121 	 *
2122 	 * The same for nocow_end, it's to avoid double cleaning up the range
2123 	 * already cleaned by nocow_one_range().
2124 	 */
2125 	u64 cow_end = 0;
2126 	u64 nocow_end = 0;
2127 	u64 cur_offset = start;
2128 	int ret;
2129 	bool check_prev = true;
2130 	u64 ino = btrfs_ino(inode);
2131 	struct can_nocow_file_extent_args nocow_args = { 0 };
2132 	/* The range that has ordered extent(s). */
2133 	u64 oe_cleanup_start;
2134 	u64 oe_cleanup_len = 0;
2135 	/* The range that is untouched. */
2136 	u64 untouched_start;
2137 	u64 untouched_len = 0;
2138 
2139 	/*
2140 	 * Normally on a zoned device we're only doing COW writes, but in case
2141 	 * of relocation on a zoned filesystem serializes I/O so that we're only
2142 	 * writing sequentially and can end up here as well.
2143 	 */
2144 	ASSERT(!btrfs_is_zoned(fs_info) || btrfs_is_data_reloc_root(root));
2145 
2146 	if (btrfs_is_shutdown(fs_info)) {
2147 		ret = -EIO;
2148 		goto error;
2149 	}
2150 	path = btrfs_alloc_path();
2151 	if (!path) {
2152 		ret = -ENOMEM;
2153 		goto error;
2154 	}
2155 
2156 	nocow_args.end = end;
2157 	nocow_args.writeback_path = true;
2158 
2159 	while (cur_offset <= end) {
2160 		struct btrfs_block_group *nocow_bg = NULL;
2161 		struct btrfs_key found_key;
2162 		struct btrfs_file_extent_item *fi;
2163 		struct extent_buffer *leaf;
2164 		struct extent_state *cached_state = NULL;
2165 		u64 extent_end;
2166 		int extent_type;
2167 
2168 		ret = btrfs_lookup_file_extent(NULL, root, path, ino,
2169 					       cur_offset, 0);
2170 		if (ret < 0)
2171 			goto error;
2172 
2173 		/*
2174 		 * If there is no extent for our range when doing the initial
2175 		 * search, then go back to the previous slot as it will be the
2176 		 * one containing the search offset
2177 		 */
2178 		if (ret > 0 && path->slots[0] > 0 && check_prev) {
2179 			leaf = path->nodes[0];
2180 			btrfs_item_key_to_cpu(leaf, &found_key,
2181 					      path->slots[0] - 1);
2182 			if (found_key.objectid == ino &&
2183 			    found_key.type == BTRFS_EXTENT_DATA_KEY)
2184 				path->slots[0]--;
2185 		}
2186 		check_prev = false;
2187 next_slot:
2188 		/* Go to next leaf if we have exhausted the current one */
2189 		leaf = path->nodes[0];
2190 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2191 			ret = btrfs_next_leaf(root, path);
2192 			if (ret < 0)
2193 				goto error;
2194 			if (ret > 0)
2195 				break;
2196 			leaf = path->nodes[0];
2197 		}
2198 
2199 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2200 
2201 		/* Didn't find anything for our INO */
2202 		if (found_key.objectid > ino)
2203 			break;
2204 		/*
2205 		 * Keep searching until we find an EXTENT_ITEM or there are no
2206 		 * more extents for this inode
2207 		 */
2208 		if (WARN_ON_ONCE(found_key.objectid < ino) ||
2209 		    found_key.type < BTRFS_EXTENT_DATA_KEY) {
2210 			path->slots[0]++;
2211 			goto next_slot;
2212 		}
2213 
2214 		/* Found key is not EXTENT_DATA_KEY or starts after req range */
2215 		if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
2216 		    found_key.offset > end)
2217 			break;
2218 
2219 		/*
2220 		 * If the found extent starts after requested offset, then
2221 		 * adjust cur_offset to be right before this extent begins.
2222 		 */
2223 		if (found_key.offset > cur_offset) {
2224 			if (cow_start == (u64)-1)
2225 				cow_start = cur_offset;
2226 			cur_offset = found_key.offset;
2227 			goto next_slot;
2228 		}
2229 
2230 		/*
2231 		 * Found extent which begins before our range and potentially
2232 		 * intersect it
2233 		 */
2234 		fi = btrfs_item_ptr(leaf, path->slots[0],
2235 				    struct btrfs_file_extent_item);
2236 		extent_type = btrfs_file_extent_type(leaf, fi);
2237 		/* If this is triggered then we have a memory corruption. */
2238 		ASSERT(extent_type < BTRFS_NR_FILE_EXTENT_TYPES);
2239 		if (WARN_ON(extent_type >= BTRFS_NR_FILE_EXTENT_TYPES)) {
2240 			ret = -EUCLEAN;
2241 			goto error;
2242 		}
2243 		extent_end = btrfs_file_extent_end(path);
2244 
2245 		/*
2246 		 * If the extent we got ends before our current offset, skip to
2247 		 * the next extent.
2248 		 */
2249 		if (extent_end <= cur_offset) {
2250 			path->slots[0]++;
2251 			goto next_slot;
2252 		}
2253 
2254 		nocow_args.start = cur_offset;
2255 		ret = can_nocow_file_extent(path, &found_key, inode, &nocow_args);
2256 		if (ret < 0)
2257 			goto error;
2258 		if (ret == 0)
2259 			goto must_cow;
2260 
2261 		ret = 0;
2262 		nocow_bg = btrfs_inc_nocow_writers(fs_info,
2263 				nocow_args.file_extent.disk_bytenr +
2264 				nocow_args.file_extent.offset);
2265 		if (!nocow_bg) {
2266 must_cow:
2267 			/*
2268 			 * If we can't perform NOCOW writeback for the range,
2269 			 * then record the beginning of the range that needs to
2270 			 * be COWed.  It will be written out before the next
2271 			 * NOCOW range if we find one, or when exiting this
2272 			 * loop.
2273 			 */
2274 			if (cow_start == (u64)-1)
2275 				cow_start = cur_offset;
2276 			cur_offset = extent_end;
2277 			if (cur_offset > end)
2278 				break;
2279 			if (!path->nodes[0])
2280 				continue;
2281 			path->slots[0]++;
2282 			goto next_slot;
2283 		}
2284 
2285 		/*
2286 		 * COW range from cow_start to found_key.offset - 1. As the key
2287 		 * will contain the beginning of the first extent that can be
2288 		 * NOCOW, following one which needs to be COW'ed
2289 		 */
2290 		if (cow_start != (u64)-1) {
2291 			ret = fallback_to_cow(inode, locked_folio, cow_start,
2292 					      found_key.offset - 1);
2293 			if (ret) {
2294 				cow_end = found_key.offset - 1;
2295 				btrfs_dec_nocow_writers(nocow_bg);
2296 				goto error;
2297 			}
2298 			cow_start = (u64)-1;
2299 		}
2300 
2301 		ret = nocow_one_range(inode, locked_folio, &cached_state,
2302 				      &nocow_args, cur_offset,
2303 				      extent_type == BTRFS_FILE_EXTENT_PREALLOC);
2304 		btrfs_dec_nocow_writers(nocow_bg);
2305 		if (ret < 0) {
2306 			nocow_end = cur_offset + nocow_args.file_extent.num_bytes - 1;
2307 			goto error;
2308 		}
2309 		cur_offset = extent_end;
2310 	}
2311 	btrfs_release_path(path);
2312 
2313 	if (cur_offset <= end && cow_start == (u64)-1)
2314 		cow_start = cur_offset;
2315 
2316 	if (cow_start != (u64)-1) {
2317 		ret = fallback_to_cow(inode, locked_folio, cow_start, end);
2318 		if (ret) {
2319 			cow_end = end;
2320 			goto error;
2321 		}
2322 		cow_start = (u64)-1;
2323 	}
2324 
2325 	/*
2326 	 * Everything is finished without an error, can unlock the folios now.
2327 	 *
2328 	 * No need to touch the io tree range nor set folio ordered flag, as
2329 	 * fallback_to_cow() and nocow_one_range() have already handled them.
2330 	 */
2331 	extent_clear_unlock_delalloc(inode, start, end, locked_folio, NULL, 0, PAGE_UNLOCK);
2332 
2333 	btrfs_free_path(path);
2334 	return 0;
2335 
2336 error:
2337 	if (cow_start == (u64)-1) {
2338 		/*
2339 		 * case a)
2340 		 *    start           cur_offset               end
2341 		 *    |   OE cleanup  |       Untouched        |
2342 		 *
2343 		 * We finished a fallback_to_cow() or nocow_one_range() call,
2344 		 * but failed to check the next range.
2345 		 *
2346 		 * or
2347 		 *    start           cur_offset   nocow_end   end
2348 		 *    |   OE cleanup  |   Skip     | Untouched |
2349 		 *
2350 		 * nocow_one_range() failed, the range [cur_offset, nocow_end] is
2351 		 * already cleaned up.
2352 		 */
2353 		oe_cleanup_start = start;
2354 		oe_cleanup_len = cur_offset - start;
2355 		if (nocow_end)
2356 			untouched_start = nocow_end + 1;
2357 		else
2358 			untouched_start = cur_offset;
2359 		untouched_len = end + 1 - untouched_start;
2360 	} else if (cow_start != (u64)-1 && cow_end == 0) {
2361 		/*
2362 		 * case b)
2363 		 *    start        cow_start    cur_offset   end
2364 		 *    | OE cleanup |        Untouched        |
2365 		 *
2366 		 * We got a range that needs COW, but before we hit the next NOCOW range,
2367 		 * thus [cow_start, cur_offset) doesn't yet have any OE.
2368 		 */
2369 		oe_cleanup_start = start;
2370 		oe_cleanup_len = cow_start - start;
2371 		untouched_start = cow_start;
2372 		untouched_len = end + 1 - untouched_start;
2373 	} else {
2374 		/*
2375 		 * case c)
2376 		 *    start        cow_start    cow_end      end
2377 		 *    | OE cleanup |   Skip     |  Untouched |
2378 		 *
2379 		 * fallback_to_cow() failed, and fallback_to_cow() will do the
2380 		 * cleanup for its range, we shouldn't touch the range
2381 		 * [cow_start, cow_end].
2382 		 */
2383 		ASSERT(cow_start != (u64)-1 && cow_end != 0);
2384 		oe_cleanup_start = start;
2385 		oe_cleanup_len = cow_start - start;
2386 		untouched_start = cow_end + 1;
2387 		untouched_len = end + 1 - untouched_start;
2388 	}
2389 
2390 	if (oe_cleanup_len) {
2391 		const u64 oe_cleanup_end = oe_cleanup_start + oe_cleanup_len - 1;
2392 		btrfs_cleanup_ordered_extents(inode, oe_cleanup_start, oe_cleanup_len);
2393 		extent_clear_unlock_delalloc(inode, oe_cleanup_start, oe_cleanup_end,
2394 					     locked_folio, NULL,
2395 					     EXTENT_LOCKED | EXTENT_DELALLOC,
2396 					     PAGE_UNLOCK | PAGE_START_WRITEBACK |
2397 					     PAGE_END_WRITEBACK);
2398 	}
2399 
2400 	if (untouched_len) {
2401 		struct extent_state *cached = NULL;
2402 		const u64 untouched_end = untouched_start + untouched_len - 1;
2403 
2404 		/*
2405 		 * We need to lock the extent here because we're clearing DELALLOC and
2406 		 * we're not locked at this point.
2407 		 */
2408 		btrfs_lock_extent(&inode->io_tree, untouched_start, untouched_end, &cached);
2409 		extent_clear_unlock_delalloc(inode, untouched_start, untouched_end,
2410 					     locked_folio, &cached,
2411 					     EXTENT_LOCKED | EXTENT_DELALLOC |
2412 					     EXTENT_DEFRAG |
2413 					     EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
2414 					     PAGE_START_WRITEBACK |
2415 					     PAGE_END_WRITEBACK);
2416 		btrfs_qgroup_free_data(inode, NULL, untouched_start, untouched_len, NULL);
2417 	}
2418 	btrfs_free_path(path);
2419 	btrfs_err(fs_info,
2420 "%s failed, root=%llu inode=%llu start=%llu len=%llu cur_offset=%llu oe_cleanup=%llu oe_cleanup_len=%llu untouched_start=%llu untouched_len=%llu: %d",
2421 		  __func__, btrfs_root_id(inode->root), btrfs_ino(inode),
2422 		  start, end + 1 - start, cur_offset, oe_cleanup_start, oe_cleanup_len,
2423 		  untouched_start, untouched_len, ret);
2424 	return ret;
2425 }
2426 
should_nocow(struct btrfs_inode * inode,u64 start,u64 end)2427 static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end)
2428 {
2429 	if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) {
2430 		if (inode->defrag_bytes &&
2431 		    btrfs_test_range_bit_exists(&inode->io_tree, start, end, EXTENT_DEFRAG))
2432 			return false;
2433 		return true;
2434 	}
2435 	return false;
2436 }
2437 
2438 /*
2439  * Function to process delayed allocation (create CoW) for ranges which are
2440  * being touched for the first time.
2441  */
btrfs_run_delalloc_range(struct btrfs_inode * inode,struct folio * locked_folio,u64 start,u64 end,struct writeback_control * wbc)2442 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_folio,
2443 			     u64 start, u64 end, struct writeback_control *wbc)
2444 {
2445 	const bool zoned = btrfs_is_zoned(inode->root->fs_info);
2446 
2447 	/*
2448 	 * The range must cover part of the @locked_folio, or a return of 1
2449 	 * can confuse the caller.
2450 	 */
2451 	ASSERT(!(end <= folio_pos(locked_folio) ||
2452 		 start >= folio_next_pos(locked_folio)));
2453 
2454 	if (should_nocow(inode, start, end))
2455 		return run_delalloc_nocow(inode, locked_folio, start, end);
2456 
2457 	if (btrfs_inode_can_compress(inode) &&
2458 	    inode_need_compress(inode, start, end) &&
2459 	    run_delalloc_compressed(inode, locked_folio, start, end, wbc))
2460 		return 1;
2461 
2462 	if (zoned)
2463 		return run_delalloc_cow(inode, locked_folio, start, end, wbc, true);
2464 	else
2465 		return cow_file_range(inode, locked_folio, start, end, NULL, 0);
2466 }
2467 
btrfs_split_delalloc_extent(struct btrfs_inode * inode,struct extent_state * orig,u64 split)2468 void btrfs_split_delalloc_extent(struct btrfs_inode *inode,
2469 				 struct extent_state *orig, u64 split)
2470 {
2471 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2472 	u64 size;
2473 
2474 	lockdep_assert_held(&inode->io_tree.lock);
2475 
2476 	/* not delalloc, ignore it */
2477 	if (!(orig->state & EXTENT_DELALLOC))
2478 		return;
2479 
2480 	size = orig->end - orig->start + 1;
2481 	if (size > fs_info->max_extent_size) {
2482 		u32 num_extents;
2483 		u64 new_size;
2484 
2485 		/*
2486 		 * See the explanation in btrfs_merge_delalloc_extent, the same
2487 		 * applies here, just in reverse.
2488 		 */
2489 		new_size = orig->end - split + 1;
2490 		num_extents = count_max_extents(fs_info, new_size);
2491 		new_size = split - orig->start;
2492 		num_extents += count_max_extents(fs_info, new_size);
2493 		if (count_max_extents(fs_info, size) >= num_extents)
2494 			return;
2495 	}
2496 
2497 	spin_lock(&inode->lock);
2498 	btrfs_mod_outstanding_extents(inode, 1);
2499 	spin_unlock(&inode->lock);
2500 }
2501 
2502 /*
2503  * Handle merged delayed allocation extents so we can keep track of new extents
2504  * that are just merged onto old extents, such as when we are doing sequential
2505  * writes, so we can properly account for the metadata space we'll need.
2506  */
btrfs_merge_delalloc_extent(struct btrfs_inode * inode,struct extent_state * new,struct extent_state * other)2507 void btrfs_merge_delalloc_extent(struct btrfs_inode *inode, struct extent_state *new,
2508 				 struct extent_state *other)
2509 {
2510 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2511 	u64 new_size, old_size;
2512 	u32 num_extents;
2513 
2514 	lockdep_assert_held(&inode->io_tree.lock);
2515 
2516 	/* not delalloc, ignore it */
2517 	if (!(other->state & EXTENT_DELALLOC))
2518 		return;
2519 
2520 	if (new->start > other->start)
2521 		new_size = new->end - other->start + 1;
2522 	else
2523 		new_size = other->end - new->start + 1;
2524 
2525 	/* we're not bigger than the max, unreserve the space and go */
2526 	if (new_size <= fs_info->max_extent_size) {
2527 		spin_lock(&inode->lock);
2528 		btrfs_mod_outstanding_extents(inode, -1);
2529 		spin_unlock(&inode->lock);
2530 		return;
2531 	}
2532 
2533 	/*
2534 	 * We have to add up either side to figure out how many extents were
2535 	 * accounted for before we merged into one big extent.  If the number of
2536 	 * extents we accounted for is <= the amount we need for the new range
2537 	 * then we can return, otherwise drop.  Think of it like this
2538 	 *
2539 	 * [ 4k][MAX_SIZE]
2540 	 *
2541 	 * So we've grown the extent by a MAX_SIZE extent, this would mean we
2542 	 * need 2 outstanding extents, on one side we have 1 and the other side
2543 	 * we have 1 so they are == and we can return.  But in this case
2544 	 *
2545 	 * [MAX_SIZE+4k][MAX_SIZE+4k]
2546 	 *
2547 	 * Each range on their own accounts for 2 extents, but merged together
2548 	 * they are only 3 extents worth of accounting, so we need to drop in
2549 	 * this case.
2550 	 */
2551 	old_size = other->end - other->start + 1;
2552 	num_extents = count_max_extents(fs_info, old_size);
2553 	old_size = new->end - new->start + 1;
2554 	num_extents += count_max_extents(fs_info, old_size);
2555 	if (count_max_extents(fs_info, new_size) >= num_extents)
2556 		return;
2557 
2558 	spin_lock(&inode->lock);
2559 	btrfs_mod_outstanding_extents(inode, -1);
2560 	spin_unlock(&inode->lock);
2561 }
2562 
btrfs_add_delalloc_inode(struct btrfs_inode * inode)2563 static void btrfs_add_delalloc_inode(struct btrfs_inode *inode)
2564 {
2565 	struct btrfs_root *root = inode->root;
2566 	struct btrfs_fs_info *fs_info = root->fs_info;
2567 
2568 	spin_lock(&root->delalloc_lock);
2569 	ASSERT(list_empty(&inode->delalloc_inodes));
2570 	list_add_tail(&inode->delalloc_inodes, &root->delalloc_inodes);
2571 	root->nr_delalloc_inodes++;
2572 	if (root->nr_delalloc_inodes == 1) {
2573 		spin_lock(&fs_info->delalloc_root_lock);
2574 		ASSERT(list_empty(&root->delalloc_root));
2575 		list_add_tail(&root->delalloc_root, &fs_info->delalloc_roots);
2576 		spin_unlock(&fs_info->delalloc_root_lock);
2577 	}
2578 	spin_unlock(&root->delalloc_lock);
2579 }
2580 
btrfs_del_delalloc_inode(struct btrfs_inode * inode)2581 void btrfs_del_delalloc_inode(struct btrfs_inode *inode)
2582 {
2583 	struct btrfs_root *root = inode->root;
2584 	struct btrfs_fs_info *fs_info = root->fs_info;
2585 
2586 	lockdep_assert_held(&root->delalloc_lock);
2587 
2588 	/*
2589 	 * We may be called after the inode was already deleted from the list,
2590 	 * namely in the transaction abort path btrfs_destroy_delalloc_inodes(),
2591 	 * and then later through btrfs_clear_delalloc_extent() while the inode
2592 	 * still has ->delalloc_bytes > 0.
2593 	 */
2594 	if (!list_empty(&inode->delalloc_inodes)) {
2595 		list_del_init(&inode->delalloc_inodes);
2596 		root->nr_delalloc_inodes--;
2597 		if (!root->nr_delalloc_inodes) {
2598 			ASSERT(list_empty(&root->delalloc_inodes));
2599 			spin_lock(&fs_info->delalloc_root_lock);
2600 			ASSERT(!list_empty(&root->delalloc_root));
2601 			list_del_init(&root->delalloc_root);
2602 			spin_unlock(&fs_info->delalloc_root_lock);
2603 		}
2604 	}
2605 }
2606 
2607 /*
2608  * Properly track delayed allocation bytes in the inode and to maintain the
2609  * list of inodes that have pending delalloc work to be done.
2610  */
btrfs_set_delalloc_extent(struct btrfs_inode * inode,struct extent_state * state,u32 bits)2611 void btrfs_set_delalloc_extent(struct btrfs_inode *inode, struct extent_state *state,
2612 			       u32 bits)
2613 {
2614 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2615 
2616 	lockdep_assert_held(&inode->io_tree.lock);
2617 
2618 	if ((bits & EXTENT_DEFRAG) && !(bits & EXTENT_DELALLOC))
2619 		WARN_ON(1);
2620 	/*
2621 	 * set_bit and clear bit hooks normally require _irqsave/restore
2622 	 * but in this case, we are only testing for the DELALLOC
2623 	 * bit, which is only set or cleared with irqs on
2624 	 */
2625 	if (!(state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
2626 		u64 len = state->end + 1 - state->start;
2627 		u64 prev_delalloc_bytes;
2628 		u32 num_extents = count_max_extents(fs_info, len);
2629 
2630 		spin_lock(&inode->lock);
2631 		btrfs_mod_outstanding_extents(inode, num_extents);
2632 		spin_unlock(&inode->lock);
2633 
2634 		/* For sanity tests */
2635 		if (btrfs_is_testing(fs_info))
2636 			return;
2637 
2638 		percpu_counter_add_batch(&fs_info->delalloc_bytes, len,
2639 					 fs_info->delalloc_batch);
2640 		spin_lock(&inode->lock);
2641 		prev_delalloc_bytes = inode->delalloc_bytes;
2642 		inode->delalloc_bytes += len;
2643 		if (bits & EXTENT_DEFRAG)
2644 			inode->defrag_bytes += len;
2645 		spin_unlock(&inode->lock);
2646 
2647 		/*
2648 		 * We don't need to be under the protection of the inode's lock,
2649 		 * because we are called while holding the inode's io_tree lock
2650 		 * and are therefore protected against concurrent calls of this
2651 		 * function and btrfs_clear_delalloc_extent().
2652 		 */
2653 		if (!btrfs_is_free_space_inode(inode) && prev_delalloc_bytes == 0)
2654 			btrfs_add_delalloc_inode(inode);
2655 	}
2656 
2657 	if (!(state->state & EXTENT_DELALLOC_NEW) &&
2658 	    (bits & EXTENT_DELALLOC_NEW)) {
2659 		spin_lock(&inode->lock);
2660 		inode->new_delalloc_bytes += state->end + 1 - state->start;
2661 		spin_unlock(&inode->lock);
2662 	}
2663 }
2664 
2665 /*
2666  * Once a range is no longer delalloc this function ensures that proper
2667  * accounting happens.
2668  */
btrfs_clear_delalloc_extent(struct btrfs_inode * inode,struct extent_state * state,u32 bits)2669 void btrfs_clear_delalloc_extent(struct btrfs_inode *inode,
2670 				 struct extent_state *state, u32 bits)
2671 {
2672 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2673 	u64 len = state->end + 1 - state->start;
2674 	u32 num_extents = count_max_extents(fs_info, len);
2675 
2676 	lockdep_assert_held(&inode->io_tree.lock);
2677 
2678 	if ((state->state & EXTENT_DEFRAG) && (bits & EXTENT_DEFRAG)) {
2679 		spin_lock(&inode->lock);
2680 		inode->defrag_bytes -= len;
2681 		spin_unlock(&inode->lock);
2682 	}
2683 
2684 	/*
2685 	 * set_bit and clear bit hooks normally require _irqsave/restore
2686 	 * but in this case, we are only testing for the DELALLOC
2687 	 * bit, which is only set or cleared with irqs on
2688 	 */
2689 	if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
2690 		struct btrfs_root *root = inode->root;
2691 		u64 new_delalloc_bytes;
2692 
2693 		spin_lock(&inode->lock);
2694 		btrfs_mod_outstanding_extents(inode, -num_extents);
2695 		spin_unlock(&inode->lock);
2696 
2697 		/*
2698 		 * We don't reserve metadata space for space cache inodes so we
2699 		 * don't need to call delalloc_release_metadata if there is an
2700 		 * error.
2701 		 */
2702 		if (bits & EXTENT_CLEAR_META_RESV &&
2703 		    root != fs_info->tree_root)
2704 			btrfs_delalloc_release_metadata(inode, len, true);
2705 
2706 		/* For sanity tests. */
2707 		if (btrfs_is_testing(fs_info))
2708 			return;
2709 
2710 		if (!btrfs_is_data_reloc_root(root) &&
2711 		    !btrfs_is_free_space_inode(inode) &&
2712 		    !(state->state & EXTENT_NORESERVE) &&
2713 		    (bits & EXTENT_CLEAR_DATA_RESV))
2714 			btrfs_free_reserved_data_space_noquota(inode, len);
2715 
2716 		percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
2717 					 fs_info->delalloc_batch);
2718 		spin_lock(&inode->lock);
2719 		inode->delalloc_bytes -= len;
2720 		new_delalloc_bytes = inode->delalloc_bytes;
2721 		spin_unlock(&inode->lock);
2722 
2723 		/*
2724 		 * We don't need to be under the protection of the inode's lock,
2725 		 * because we are called while holding the inode's io_tree lock
2726 		 * and are therefore protected against concurrent calls of this
2727 		 * function and btrfs_set_delalloc_extent().
2728 		 */
2729 		if (!btrfs_is_free_space_inode(inode) && new_delalloc_bytes == 0) {
2730 			spin_lock(&root->delalloc_lock);
2731 			btrfs_del_delalloc_inode(inode);
2732 			spin_unlock(&root->delalloc_lock);
2733 		}
2734 	}
2735 
2736 	if ((state->state & EXTENT_DELALLOC_NEW) &&
2737 	    (bits & EXTENT_DELALLOC_NEW)) {
2738 		spin_lock(&inode->lock);
2739 		ASSERT(inode->new_delalloc_bytes >= len);
2740 		inode->new_delalloc_bytes -= len;
2741 		if (bits & EXTENT_ADD_INODE_BYTES)
2742 			inode_add_bytes(&inode->vfs_inode, len);
2743 		spin_unlock(&inode->lock);
2744 	}
2745 }
2746 
2747 /*
2748  * given a list of ordered sums record them in the inode.  This happens
2749  * at IO completion time based on sums calculated at bio submission time.
2750  */
add_pending_csums(struct btrfs_trans_handle * trans,struct list_head * list)2751 static int add_pending_csums(struct btrfs_trans_handle *trans,
2752 			     struct list_head *list)
2753 {
2754 	struct btrfs_ordered_sum *sum;
2755 	struct btrfs_root *csum_root = NULL;
2756 	int ret;
2757 
2758 	list_for_each_entry(sum, list, list) {
2759 		if (!csum_root) {
2760 			csum_root = btrfs_csum_root(trans->fs_info,
2761 						    sum->logical);
2762 			if (unlikely(!csum_root)) {
2763 				btrfs_err(trans->fs_info,
2764 				  "missing csum root for extent at bytenr %llu",
2765 					  sum->logical);
2766 				return -EUCLEAN;
2767 			}
2768 		}
2769 		trans->adding_csums = true;
2770 		ret = btrfs_csum_file_blocks(trans, csum_root, sum);
2771 		trans->adding_csums = false;
2772 		if (ret)
2773 			return ret;
2774 	}
2775 	return 0;
2776 }
2777 
btrfs_find_new_delalloc_bytes(struct btrfs_inode * inode,const u64 start,const u64 len,struct extent_state ** cached_state)2778 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
2779 					 const u64 start,
2780 					 const u64 len,
2781 					 struct extent_state **cached_state)
2782 {
2783 	u64 search_start = start;
2784 	const u64 end = start + len - 1;
2785 
2786 	while (search_start < end) {
2787 		const u64 search_len = end - search_start + 1;
2788 		struct extent_map *em;
2789 		u64 em_len;
2790 		int ret = 0;
2791 
2792 		em = btrfs_get_extent(inode, NULL, search_start, search_len);
2793 		if (IS_ERR(em))
2794 			return PTR_ERR(em);
2795 
2796 		if (em->disk_bytenr != EXTENT_MAP_HOLE)
2797 			goto next;
2798 
2799 		em_len = em->len;
2800 		if (em->start < search_start)
2801 			em_len -= search_start - em->start;
2802 		if (em_len > search_len)
2803 			em_len = search_len;
2804 
2805 		ret = btrfs_set_extent_bit(&inode->io_tree, search_start,
2806 					   search_start + em_len - 1,
2807 					   EXTENT_DELALLOC_NEW, cached_state);
2808 next:
2809 		search_start = btrfs_extent_map_end(em);
2810 		btrfs_free_extent_map(em);
2811 		if (ret)
2812 			return ret;
2813 	}
2814 	return 0;
2815 }
2816 
btrfs_set_extent_delalloc(struct btrfs_inode * inode,u64 start,u64 end,unsigned int extra_bits,struct extent_state ** cached_state)2817 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
2818 			      unsigned int extra_bits,
2819 			      struct extent_state **cached_state)
2820 {
2821 	WARN_ON(PAGE_ALIGNED(end));
2822 
2823 	if (start >= i_size_read(&inode->vfs_inode) &&
2824 	    !(inode->flags & BTRFS_INODE_PREALLOC)) {
2825 		/*
2826 		 * There can't be any extents following eof in this case so just
2827 		 * set the delalloc new bit for the range directly.
2828 		 */
2829 		extra_bits |= EXTENT_DELALLOC_NEW;
2830 	} else {
2831 		int ret;
2832 
2833 		ret = btrfs_find_new_delalloc_bytes(inode, start,
2834 						    end + 1 - start,
2835 						    cached_state);
2836 		if (ret)
2837 			return ret;
2838 	}
2839 
2840 	return btrfs_set_extent_bit(&inode->io_tree, start, end,
2841 				    EXTENT_DELALLOC | extra_bits, cached_state);
2842 }
2843 
2844 /* see btrfs_writepage_start_hook for details on why this is required */
2845 struct btrfs_writepage_fixup {
2846 	struct folio *folio;
2847 	struct btrfs_inode *inode;
2848 	struct btrfs_work work;
2849 };
2850 
btrfs_writepage_fixup_worker(struct btrfs_work * work)2851 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
2852 {
2853 	struct btrfs_writepage_fixup *fixup =
2854 		container_of(work, struct btrfs_writepage_fixup, work);
2855 	struct btrfs_ordered_extent *ordered;
2856 	struct extent_state *cached_state = NULL;
2857 	struct extent_changeset *data_reserved = NULL;
2858 	struct folio *folio = fixup->folio;
2859 	struct btrfs_inode *inode = fixup->inode;
2860 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2861 	u64 page_start = folio_pos(folio);
2862 	u64 page_end = folio_next_pos(folio) - 1;
2863 	int ret = 0;
2864 	bool free_delalloc_space = true;
2865 
2866 	/*
2867 	 * This is similar to page_mkwrite, we need to reserve the space before
2868 	 * we take the folio lock.
2869 	 */
2870 	ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
2871 					   folio_size(folio));
2872 again:
2873 	folio_lock(folio);
2874 
2875 	/*
2876 	 * Before we queued this fixup, we took a reference on the folio.
2877 	 * folio->mapping may go NULL, but it shouldn't be moved to a different
2878 	 * address space.
2879 	 */
2880 	if (!folio->mapping || !folio_test_dirty(folio) ||
2881 	    !folio_test_checked(folio)) {
2882 		/*
2883 		 * Unfortunately this is a little tricky, either
2884 		 *
2885 		 * 1) We got here and our folio had already been dealt with and
2886 		 *    we reserved our space, thus ret == 0, so we need to just
2887 		 *    drop our space reservation and bail.  This can happen the
2888 		 *    first time we come into the fixup worker, or could happen
2889 		 *    while waiting for the ordered extent.
2890 		 * 2) Our folio was already dealt with, but we happened to get an
2891 		 *    ENOSPC above from the btrfs_delalloc_reserve_space.  In
2892 		 *    this case we obviously don't have anything to release, but
2893 		 *    because the folio was already dealt with we don't want to
2894 		 *    mark the folio with an error, so make sure we're resetting
2895 		 *    ret to 0.  This is why we have this check _before_ the ret
2896 		 *    check, because we do not want to have a surprise ENOSPC
2897 		 *    when the folio was already properly dealt with.
2898 		 */
2899 		if (!ret) {
2900 			btrfs_delalloc_release_extents(inode, folio_size(folio));
2901 			btrfs_delalloc_release_space(inode, data_reserved,
2902 						     page_start, folio_size(folio),
2903 						     true);
2904 		}
2905 		ret = 0;
2906 		goto out_page;
2907 	}
2908 
2909 	/*
2910 	 * We can't mess with the folio state unless it is locked, so now that
2911 	 * it is locked bail if we failed to make our space reservation.
2912 	 */
2913 	if (ret)
2914 		goto out_page;
2915 
2916 	btrfs_lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
2917 
2918 	/* already ordered? We're done */
2919 	if (folio_test_ordered(folio))
2920 		goto out_reserved;
2921 
2922 	ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
2923 	if (ordered) {
2924 		btrfs_unlock_extent(&inode->io_tree, page_start, page_end,
2925 				    &cached_state);
2926 		folio_unlock(folio);
2927 		btrfs_start_ordered_extent(ordered);
2928 		btrfs_put_ordered_extent(ordered);
2929 		goto again;
2930 	}
2931 
2932 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
2933 					&cached_state);
2934 	if (ret)
2935 		goto out_reserved;
2936 
2937 	/*
2938 	 * Everything went as planned, we're now the owner of a dirty page with
2939 	 * delayed allocation bits set and space reserved for our COW
2940 	 * destination.
2941 	 *
2942 	 * The page was dirty when we started, nothing should have cleaned it.
2943 	 */
2944 	BUG_ON(!folio_test_dirty(folio));
2945 	free_delalloc_space = false;
2946 out_reserved:
2947 	btrfs_delalloc_release_extents(inode, PAGE_SIZE);
2948 	if (free_delalloc_space)
2949 		btrfs_delalloc_release_space(inode, data_reserved, page_start,
2950 					     PAGE_SIZE, true);
2951 	btrfs_unlock_extent(&inode->io_tree, page_start, page_end, &cached_state);
2952 out_page:
2953 	if (ret) {
2954 		/*
2955 		 * We hit ENOSPC or other errors.  Update the mapping and page
2956 		 * to reflect the errors and clean the page.
2957 		 */
2958 		mapping_set_error(folio->mapping, ret);
2959 		btrfs_mark_ordered_io_finished(inode, folio, page_start,
2960 					       folio_size(folio), !ret);
2961 		folio_clear_dirty_for_io(folio);
2962 	}
2963 	btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE);
2964 	folio_unlock(folio);
2965 	folio_put(folio);
2966 	kfree(fixup);
2967 	extent_changeset_free(data_reserved);
2968 	/*
2969 	 * As a precaution, do a delayed iput in case it would be the last iput
2970 	 * that could need flushing space. Recursing back to fixup worker would
2971 	 * deadlock.
2972 	 */
2973 	btrfs_add_delayed_iput(inode);
2974 }
2975 
2976 /*
2977  * There are a few paths in the higher layers of the kernel that directly
2978  * set the folio dirty bit without asking the filesystem if it is a
2979  * good idea.  This causes problems because we want to make sure COW
2980  * properly happens and the data=ordered rules are followed.
2981  *
2982  * In our case any range that doesn't have the ORDERED bit set
2983  * hasn't been properly setup for IO.  We kick off an async process
2984  * to fix it up.  The async helper will wait for ordered extents, set
2985  * the delalloc bit and make it safe to write the folio.
2986  */
btrfs_writepage_cow_fixup(struct folio * folio)2987 int btrfs_writepage_cow_fixup(struct folio *folio)
2988 {
2989 	struct inode *inode = folio->mapping->host;
2990 	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
2991 	struct btrfs_writepage_fixup *fixup;
2992 
2993 	/* This folio has ordered extent covering it already */
2994 	if (folio_test_ordered(folio))
2995 		return 0;
2996 
2997 	/*
2998 	 * For experimental build, we error out instead of EAGAIN.
2999 	 *
3000 	 * We should not hit such out-of-band dirty folios anymore.
3001 	 */
3002 	if (IS_ENABLED(CONFIG_BTRFS_EXPERIMENTAL)) {
3003 		DEBUG_WARN();
3004 		btrfs_err_rl(fs_info,
3005 	"root %lld ino %llu folio %llu is marked dirty without notifying the fs",
3006 			     btrfs_root_id(BTRFS_I(inode)->root),
3007 			     btrfs_ino(BTRFS_I(inode)),
3008 			     folio_pos(folio));
3009 		return -EUCLEAN;
3010 	}
3011 
3012 	/*
3013 	 * folio_checked is set below when we create a fixup worker for this
3014 	 * folio, don't try to create another one if we're already
3015 	 * folio_test_checked.
3016 	 *
3017 	 * The extent_io writepage code will redirty the foio if we send back
3018 	 * EAGAIN.
3019 	 */
3020 	if (folio_test_checked(folio))
3021 		return -EAGAIN;
3022 
3023 	fixup = kzalloc_obj(*fixup, GFP_NOFS);
3024 	if (!fixup)
3025 		return -EAGAIN;
3026 
3027 	/*
3028 	 * We are already holding a reference to this inode from
3029 	 * write_cache_pages.  We need to hold it because the space reservation
3030 	 * takes place outside of the folio lock, and we can't trust
3031 	 * folio->mapping outside of the folio lock.
3032 	 */
3033 	ihold(inode);
3034 	btrfs_folio_set_checked(fs_info, folio, folio_pos(folio), folio_size(folio));
3035 	folio_get(folio);
3036 	btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL);
3037 	fixup->folio = folio;
3038 	fixup->inode = BTRFS_I(inode);
3039 	btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
3040 
3041 	return -EAGAIN;
3042 }
3043 
insert_reserved_file_extent(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,u64 file_pos,struct btrfs_file_extent_item * stack_fi,const bool update_inode_bytes,u64 qgroup_reserved)3044 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
3045 				       struct btrfs_inode *inode, u64 file_pos,
3046 				       struct btrfs_file_extent_item *stack_fi,
3047 				       const bool update_inode_bytes,
3048 				       u64 qgroup_reserved)
3049 {
3050 	struct btrfs_root *root = inode->root;
3051 	const u64 sectorsize = root->fs_info->sectorsize;
3052 	BTRFS_PATH_AUTO_FREE(path);
3053 	struct extent_buffer *leaf;
3054 	struct btrfs_key ins;
3055 	u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi);
3056 	u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi);
3057 	u64 offset = btrfs_stack_file_extent_offset(stack_fi);
3058 	u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi);
3059 	u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi);
3060 	struct btrfs_drop_extents_args drop_args = { 0 };
3061 	int ret;
3062 
3063 	path = btrfs_alloc_path();
3064 	if (!path)
3065 		return -ENOMEM;
3066 
3067 	/*
3068 	 * we may be replacing one extent in the tree with another.
3069 	 * The new extent is pinned in the extent map, and we don't want
3070 	 * to drop it from the cache until it is completely in the btree.
3071 	 *
3072 	 * So, tell btrfs_drop_extents to leave this extent in the cache.
3073 	 * the caller is expected to unpin it and allow it to be merged
3074 	 * with the others.
3075 	 */
3076 	drop_args.path = path;
3077 	drop_args.start = file_pos;
3078 	drop_args.end = file_pos + num_bytes;
3079 	drop_args.replace_extent = true;
3080 	drop_args.extent_item_size = sizeof(*stack_fi);
3081 	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
3082 	if (ret)
3083 		return ret;
3084 
3085 	if (!drop_args.extent_inserted) {
3086 		ins.objectid = btrfs_ino(inode);
3087 		ins.type = BTRFS_EXTENT_DATA_KEY;
3088 		ins.offset = file_pos;
3089 
3090 		ret = btrfs_insert_empty_item(trans, root, path, &ins,
3091 					      sizeof(*stack_fi));
3092 		if (ret)
3093 			return ret;
3094 	}
3095 	leaf = path->nodes[0];
3096 	btrfs_set_stack_file_extent_generation(stack_fi, trans->transid);
3097 	write_extent_buffer(leaf, stack_fi,
3098 			btrfs_item_ptr_offset(leaf, path->slots[0]),
3099 			sizeof(struct btrfs_file_extent_item));
3100 
3101 	btrfs_release_path(path);
3102 
3103 	/*
3104 	 * If we dropped an inline extent here, we know the range where it is
3105 	 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the
3106 	 * number of bytes only for that range containing the inline extent.
3107 	 * The remaining of the range will be processed when clearing the
3108 	 * EXTENT_DELALLOC_BIT bit through the ordered extent completion.
3109 	 */
3110 	if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) {
3111 		u64 inline_size = round_down(drop_args.bytes_found, sectorsize);
3112 
3113 		inline_size = drop_args.bytes_found - inline_size;
3114 		btrfs_update_inode_bytes(inode, sectorsize, inline_size);
3115 		drop_args.bytes_found -= inline_size;
3116 		num_bytes -= sectorsize;
3117 	}
3118 
3119 	if (update_inode_bytes)
3120 		btrfs_update_inode_bytes(inode, num_bytes, drop_args.bytes_found);
3121 
3122 	ins.objectid = disk_bytenr;
3123 	ins.type = BTRFS_EXTENT_ITEM_KEY;
3124 	ins.offset = disk_num_bytes;
3125 
3126 	ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes);
3127 	if (ret)
3128 		return ret;
3129 
3130 	return btrfs_alloc_reserved_file_extent(trans, root, btrfs_ino(inode),
3131 						file_pos - offset,
3132 						qgroup_reserved, &ins);
3133 }
3134 
btrfs_release_delalloc_bytes(struct btrfs_fs_info * fs_info,u64 start,u64 len)3135 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
3136 					 u64 start, u64 len)
3137 {
3138 	struct btrfs_block_group *cache;
3139 
3140 	cache = btrfs_lookup_block_group(fs_info, start);
3141 	ASSERT(cache);
3142 
3143 	spin_lock(&cache->lock);
3144 	cache->delalloc_bytes -= len;
3145 	spin_unlock(&cache->lock);
3146 
3147 	btrfs_put_block_group(cache);
3148 }
3149 
insert_ordered_extent_file_extent(struct btrfs_trans_handle * trans,struct btrfs_ordered_extent * oe)3150 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans,
3151 					     struct btrfs_ordered_extent *oe)
3152 {
3153 	struct btrfs_file_extent_item stack_fi;
3154 	bool update_inode_bytes;
3155 	u64 num_bytes = oe->num_bytes;
3156 	u64 ram_bytes = oe->ram_bytes;
3157 
3158 	memset(&stack_fi, 0, sizeof(stack_fi));
3159 	btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG);
3160 	btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr);
3161 	btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi,
3162 						   oe->disk_num_bytes);
3163 	btrfs_set_stack_file_extent_offset(&stack_fi, oe->offset);
3164 	if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags))
3165 		num_bytes = oe->truncated_len;
3166 	btrfs_set_stack_file_extent_num_bytes(&stack_fi, num_bytes);
3167 	btrfs_set_stack_file_extent_ram_bytes(&stack_fi, ram_bytes);
3168 	btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type);
3169 	/* Encryption and other encoding is reserved and all 0 */
3170 
3171 	/*
3172 	 * For delalloc, when completing an ordered extent we update the inode's
3173 	 * bytes when clearing the range in the inode's io tree, so pass false
3174 	 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(),
3175 	 * except if the ordered extent was truncated.
3176 	 */
3177 	update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) ||
3178 			     test_bit(BTRFS_ORDERED_ENCODED, &oe->flags) ||
3179 			     test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags);
3180 
3181 	return insert_reserved_file_extent(trans, oe->inode,
3182 					   oe->file_offset, &stack_fi,
3183 					   update_inode_bytes, oe->qgroup_rsv);
3184 }
3185 
3186 /*
3187  * As ordered data IO finishes, this gets called so we can finish
3188  * an ordered extent if the range of bytes in the file it covers are
3189  * fully written.
3190  */
btrfs_finish_one_ordered(struct btrfs_ordered_extent * ordered_extent)3191 int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
3192 {
3193 	struct btrfs_inode *inode = ordered_extent->inode;
3194 	struct btrfs_root *root = inode->root;
3195 	struct btrfs_fs_info *fs_info = root->fs_info;
3196 	struct btrfs_trans_handle *trans = NULL;
3197 	struct extent_io_tree *io_tree = &inode->io_tree;
3198 	struct extent_state *cached_state = NULL;
3199 	u64 start, end;
3200 	int compress_type = 0;
3201 	int ret = 0;
3202 	u64 logical_len = ordered_extent->num_bytes;
3203 	bool freespace_inode;
3204 	bool truncated = false;
3205 	bool clear_reserved_extent = true;
3206 	unsigned int clear_bits = EXTENT_DEFRAG;
3207 
3208 	start = ordered_extent->file_offset;
3209 	end = start + ordered_extent->num_bytes - 1;
3210 
3211 	if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3212 	    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
3213 	    !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags) &&
3214 	    !test_bit(BTRFS_ORDERED_ENCODED, &ordered_extent->flags))
3215 		clear_bits |= EXTENT_DELALLOC_NEW;
3216 
3217 	freespace_inode = btrfs_is_free_space_inode(inode);
3218 	if (!freespace_inode)
3219 		btrfs_lockdep_acquire(fs_info, btrfs_ordered_extent);
3220 
3221 	if (unlikely(test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags))) {
3222 		ret = -EIO;
3223 		goto out;
3224 	}
3225 
3226 	ret = btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr,
3227 				      ordered_extent->disk_num_bytes);
3228 	if (ret)
3229 		goto out;
3230 
3231 	if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
3232 		truncated = true;
3233 		logical_len = ordered_extent->truncated_len;
3234 		/* Truncated the entire extent, don't bother adding */
3235 		if (!logical_len)
3236 			goto out;
3237 	}
3238 
3239 	/*
3240 	 * If it's a COW write we need to lock the extent range as we will be
3241 	 * inserting/replacing file extent items and unpinning an extent map.
3242 	 * This must be taken before joining a transaction, as it's a higher
3243 	 * level lock (like the inode's VFS lock), otherwise we can run into an
3244 	 * ABBA deadlock with other tasks (transactions work like a lock,
3245 	 * depending on their current state).
3246 	 */
3247 	if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
3248 		clear_bits |= EXTENT_LOCKED | EXTENT_FINISHING_ORDERED;
3249 		btrfs_lock_extent_bits(io_tree, start, end,
3250 				       EXTENT_LOCKED | EXTENT_FINISHING_ORDERED,
3251 				       &cached_state);
3252 	}
3253 
3254 	if (freespace_inode)
3255 		trans = btrfs_join_transaction_spacecache(root);
3256 	else
3257 		trans = btrfs_join_transaction(root);
3258 	if (IS_ERR(trans)) {
3259 		ret = PTR_ERR(trans);
3260 		trans = NULL;
3261 		goto out;
3262 	}
3263 
3264 	trans->block_rsv = &inode->block_rsv;
3265 
3266 	ret = btrfs_insert_raid_extent(trans, ordered_extent);
3267 	if (unlikely(ret)) {
3268 		btrfs_abort_transaction(trans, ret);
3269 		goto out;
3270 	}
3271 
3272 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
3273 		/* Logic error */
3274 		ASSERT(list_empty(&ordered_extent->list));
3275 		if (unlikely(!list_empty(&ordered_extent->list))) {
3276 			ret = -EINVAL;
3277 			btrfs_abort_transaction(trans, ret);
3278 			goto out;
3279 		}
3280 
3281 		btrfs_inode_safe_disk_i_size_write(inode, 0);
3282 		ret = btrfs_update_inode_fallback(trans, inode);
3283 		if (unlikely(ret)) {
3284 			/* -ENOMEM or corruption */
3285 			btrfs_abort_transaction(trans, ret);
3286 		}
3287 		goto out;
3288 	}
3289 
3290 	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
3291 		compress_type = ordered_extent->compress_type;
3292 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3293 		BUG_ON(compress_type);
3294 		ret = btrfs_mark_extent_written(trans, inode,
3295 						ordered_extent->file_offset,
3296 						ordered_extent->file_offset +
3297 						logical_len);
3298 		btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr,
3299 						  ordered_extent->disk_num_bytes);
3300 		if (unlikely(ret < 0)) {
3301 			btrfs_abort_transaction(trans, ret);
3302 			goto out;
3303 		}
3304 	} else {
3305 		BUG_ON(root == fs_info->tree_root);
3306 		ret = insert_ordered_extent_file_extent(trans, ordered_extent);
3307 		if (unlikely(ret < 0)) {
3308 			btrfs_abort_transaction(trans, ret);
3309 			goto out;
3310 		}
3311 		clear_reserved_extent = false;
3312 		btrfs_release_delalloc_bytes(fs_info,
3313 					     ordered_extent->disk_bytenr,
3314 					     ordered_extent->disk_num_bytes);
3315 	}
3316 
3317 	ret = btrfs_unpin_extent_cache(inode, ordered_extent->file_offset,
3318 				       ordered_extent->num_bytes, trans->transid);
3319 	if (unlikely(ret < 0)) {
3320 		btrfs_abort_transaction(trans, ret);
3321 		goto out;
3322 	}
3323 
3324 	ret = add_pending_csums(trans, &ordered_extent->list);
3325 	if (unlikely(ret)) {
3326 		btrfs_abort_transaction(trans, ret);
3327 		goto out;
3328 	}
3329 
3330 	/*
3331 	 * If this is a new delalloc range, clear its new delalloc flag to
3332 	 * update the inode's number of bytes. This needs to be done first
3333 	 * before updating the inode item.
3334 	 */
3335 	if ((clear_bits & EXTENT_DELALLOC_NEW) &&
3336 	    !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags))
3337 		btrfs_clear_extent_bit(&inode->io_tree, start, end,
3338 				       EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES,
3339 				       &cached_state);
3340 
3341 	btrfs_inode_safe_disk_i_size_write(inode, 0);
3342 	ret = btrfs_update_inode_fallback(trans, inode);
3343 	if (unlikely(ret)) { /* -ENOMEM or corruption */
3344 		btrfs_abort_transaction(trans, ret);
3345 		goto out;
3346 	}
3347 out:
3348 	btrfs_clear_extent_bit(&inode->io_tree, start, end, clear_bits,
3349 			       &cached_state);
3350 
3351 	if (trans)
3352 		btrfs_end_transaction(trans);
3353 
3354 	if (ret || truncated) {
3355 		/*
3356 		 * If we failed to finish this ordered extent for any reason we
3357 		 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
3358 		 * extent, and mark the inode with the error if it wasn't
3359 		 * already set.  Any error during writeback would have already
3360 		 * set the mapping error, so we need to set it if we're the ones
3361 		 * marking this ordered extent as failed.
3362 		 */
3363 		if (ret)
3364 			btrfs_mark_ordered_extent_error(ordered_extent);
3365 
3366 		/*
3367 		 * Drop extent maps for the part of the extent we didn't write.
3368 		 *
3369 		 * We have an exception here for the free_space_inode, this is
3370 		 * because when we do btrfs_get_extent() on the free space inode
3371 		 * we will search the commit root.  If this is a new block group
3372 		 * we won't find anything, and we will trip over the assert in
3373 		 * writepage where we do ASSERT(em->block_start !=
3374 		 * EXTENT_MAP_HOLE).
3375 		 *
3376 		 * Theoretically we could also skip this for any NOCOW extent as
3377 		 * we don't mess with the extent map tree in the NOCOW case, but
3378 		 * for now simply skip this if we are the free space inode.
3379 		 */
3380 		if (!btrfs_is_free_space_inode(inode)) {
3381 			u64 unwritten_start = start;
3382 
3383 			if (truncated)
3384 				unwritten_start += logical_len;
3385 
3386 			btrfs_drop_extent_map_range(inode, unwritten_start,
3387 						    end, false);
3388 		}
3389 
3390 		/*
3391 		 * If the ordered extent had an IOERR or something else went
3392 		 * wrong we need to return the space for this ordered extent
3393 		 * back to the allocator.  We only free the extent in the
3394 		 * truncated case if we didn't write out the extent at all.
3395 		 *
3396 		 * If we made it past insert_reserved_file_extent before we
3397 		 * errored out then we don't need to do this as the accounting
3398 		 * has already been done.
3399 		 */
3400 		if ((ret || !logical_len) &&
3401 		    clear_reserved_extent &&
3402 		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3403 		    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3404 			/*
3405 			 * Discard the range before returning it back to the
3406 			 * free space pool
3407 			 */
3408 			if (ret && btrfs_test_opt(fs_info, DISCARD_SYNC))
3409 				btrfs_discard_extent(fs_info,
3410 						ordered_extent->disk_bytenr,
3411 						ordered_extent->disk_num_bytes,
3412 						NULL, true);
3413 			btrfs_free_reserved_extent(fs_info,
3414 					ordered_extent->disk_bytenr,
3415 					ordered_extent->disk_num_bytes, true);
3416 			/*
3417 			 * Actually free the qgroup rsv which was released when
3418 			 * the ordered extent was created.
3419 			 */
3420 			btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(inode->root),
3421 						  ordered_extent->qgroup_rsv,
3422 						  BTRFS_QGROUP_RSV_DATA);
3423 		}
3424 	}
3425 
3426 	/*
3427 	 * This needs to be done to make sure anybody waiting knows we are done
3428 	 * updating everything for this ordered extent.
3429 	 */
3430 	btrfs_remove_ordered_extent(inode, ordered_extent);
3431 
3432 	/* once for us */
3433 	btrfs_put_ordered_extent(ordered_extent);
3434 	/* once for the tree */
3435 	btrfs_put_ordered_extent(ordered_extent);
3436 
3437 	return ret;
3438 }
3439 
btrfs_finish_ordered_io(struct btrfs_ordered_extent * ordered)3440 int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered)
3441 {
3442 	if (btrfs_is_zoned(ordered->inode->root->fs_info) &&
3443 	    !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
3444 	    list_empty(&ordered->bioc_list))
3445 		btrfs_finish_ordered_zoned(ordered);
3446 	return btrfs_finish_one_ordered(ordered);
3447 }
3448 
3449 /*
3450  * Calculate the checksum of an fs block at physical memory address @paddr,
3451  * and save the result to @dest.
3452  *
3453  * The folio containing @paddr must be large enough to contain a full fs block.
3454  */
btrfs_calculate_block_csum_folio(struct btrfs_fs_info * fs_info,const phys_addr_t paddr,u8 * dest)3455 void btrfs_calculate_block_csum_folio(struct btrfs_fs_info *fs_info,
3456 				      const phys_addr_t paddr, u8 *dest)
3457 {
3458 	struct folio *folio = page_folio(phys_to_page(paddr));
3459 	const u32 blocksize = fs_info->sectorsize;
3460 	const u32 step = min(blocksize, PAGE_SIZE);
3461 	const u32 nr_steps = blocksize / step;
3462 	phys_addr_t paddrs[BTRFS_MAX_BLOCKSIZE / PAGE_SIZE];
3463 
3464 	/* The full block must be inside the folio. */
3465 	ASSERT(offset_in_folio(folio, paddr) + blocksize <= folio_size(folio));
3466 
3467 	for (int i = 0; i < nr_steps; i++) {
3468 		u32 pindex = offset_in_folio(folio, paddr + i * step) >> PAGE_SHIFT;
3469 
3470 		/*
3471 		 * For bs <= ps cases, we will only run the loop once, so the offset
3472 		 * inside the page will only added to paddrs[0].
3473 		 *
3474 		 * For bs > ps cases, the block must be page aligned, thus offset
3475 		 * inside the page will always be 0.
3476 		 */
3477 		paddrs[i] = page_to_phys(folio_page(folio, pindex)) + offset_in_page(paddr);
3478 	}
3479 	return btrfs_calculate_block_csum_pages(fs_info, paddrs, dest);
3480 }
3481 
3482 /*
3483  * Calculate the checksum of a fs block backed by multiple noncontiguous pages
3484  * at @paddrs[] and save the result to @dest.
3485  *
3486  * The folio containing @paddr must be large enough to contain a full fs block.
3487  */
btrfs_calculate_block_csum_pages(struct btrfs_fs_info * fs_info,const phys_addr_t paddrs[],u8 * dest)3488 void btrfs_calculate_block_csum_pages(struct btrfs_fs_info *fs_info,
3489 				      const phys_addr_t paddrs[], u8 *dest)
3490 {
3491 	const u32 blocksize = fs_info->sectorsize;
3492 	const u32 step = min(blocksize, PAGE_SIZE);
3493 	const u32 nr_steps = blocksize / step;
3494 	struct btrfs_csum_ctx csum;
3495 
3496 	btrfs_csum_init(&csum, fs_info->csum_type);
3497 	for (int i = 0; i < nr_steps; i++) {
3498 		const phys_addr_t paddr = paddrs[i];
3499 		void *kaddr;
3500 
3501 		ASSERT(offset_in_page(paddr) + step <= PAGE_SIZE);
3502 		kaddr = kmap_local_page(phys_to_page(paddr)) + offset_in_page(paddr);
3503 		btrfs_csum_update(&csum, kaddr, step);
3504 		kunmap_local(kaddr);
3505 	}
3506 	btrfs_csum_final(&csum, dest);
3507 }
3508 
3509 /*
3510  * Verify the checksum for a single sector without any extra action that depend
3511  * on the type of I/O.
3512  *
3513  * @kaddr must be a properly kmapped address.
3514  */
btrfs_check_block_csum(struct btrfs_fs_info * fs_info,phys_addr_t paddr,u8 * csum,const u8 * const csum_expected)3515 int btrfs_check_block_csum(struct btrfs_fs_info *fs_info, phys_addr_t paddr, u8 *csum,
3516 			   const u8 * const csum_expected)
3517 {
3518 	btrfs_calculate_block_csum_folio(fs_info, paddr, csum);
3519 	if (unlikely(memcmp(csum, csum_expected, fs_info->csum_size) != 0))
3520 		return -EIO;
3521 	return 0;
3522 }
3523 
3524 /*
3525  * Verify the checksum of a single data sector, which can be scattered at
3526  * different noncontiguous pages.
3527  *
3528  * @bbio:	btrfs_io_bio which contains the csum
3529  * @dev:	device the sector is on
3530  * @bio_offset:	offset to the beginning of the bio (in bytes)
3531  * @paddrs:	physical addresses which back the fs block
3532  *
3533  * Check if the checksum on a data block is valid.  When a checksum mismatch is
3534  * detected, report the error and fill the corrupted range with zero.
3535  *
3536  * Return %true if the sector is ok or had no checksum to start with, else %false.
3537  */
btrfs_data_csum_ok(struct btrfs_bio * bbio,struct btrfs_device * dev,u32 bio_offset,const phys_addr_t paddrs[])3538 bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev,
3539 			u32 bio_offset, const phys_addr_t paddrs[])
3540 {
3541 	struct btrfs_inode *inode = bbio->inode;
3542 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3543 	const u32 blocksize = fs_info->sectorsize;
3544 	const u32 step = min(blocksize, PAGE_SIZE);
3545 	const u32 nr_steps = blocksize / step;
3546 	u64 file_offset = bbio->file_offset + bio_offset;
3547 	u64 end = file_offset + blocksize - 1;
3548 	u8 *csum_expected;
3549 	u8 csum[BTRFS_CSUM_SIZE];
3550 
3551 	if (!bbio->csum)
3552 		return true;
3553 
3554 	if (btrfs_is_data_reloc_root(inode->root) &&
3555 	    btrfs_test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM,
3556 				 NULL)) {
3557 		/* Skip the range without csum for data reloc inode */
3558 		btrfs_clear_extent_bit(&inode->io_tree, file_offset, end,
3559 				       EXTENT_NODATASUM, NULL);
3560 		return true;
3561 	}
3562 
3563 	csum_expected = bbio->csum + (bio_offset >> fs_info->sectorsize_bits) *
3564 				fs_info->csum_size;
3565 	btrfs_calculate_block_csum_pages(fs_info, paddrs, csum);
3566 	if (unlikely(memcmp(csum, csum_expected, fs_info->csum_size) != 0))
3567 		goto zeroit;
3568 	return true;
3569 
3570 zeroit:
3571 	btrfs_print_data_csum_error(inode, file_offset, csum, csum_expected,
3572 				    bbio->mirror_num);
3573 	if (dev)
3574 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS);
3575 	for (int i = 0; i < nr_steps; i++)
3576 		memzero_page(phys_to_page(paddrs[i]), offset_in_page(paddrs[i]), step);
3577 	return false;
3578 }
3579 
3580 /*
3581  * Perform a delayed iput on @inode.
3582  *
3583  * @inode: The inode we want to perform iput on
3584  *
3585  * This function uses the generic vfs_inode::i_count to track whether we should
3586  * just decrement it (in case it's > 1) or if this is the last iput then link
3587  * the inode to the delayed iput machinery. Delayed iputs are processed at
3588  * transaction commit time/superblock commit/cleaner kthread.
3589  */
btrfs_add_delayed_iput(struct btrfs_inode * inode)3590 void btrfs_add_delayed_iput(struct btrfs_inode *inode)
3591 {
3592 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3593 	unsigned long flags;
3594 
3595 	if (atomic_add_unless(&inode->vfs_inode.i_count, -1, 1))
3596 		return;
3597 
3598 	WARN_ON_ONCE(test_bit(BTRFS_FS_STATE_NO_DELAYED_IPUT, &fs_info->fs_state));
3599 	atomic_inc(&fs_info->nr_delayed_iputs);
3600 	/*
3601 	 * Need to be irq safe here because we can be called from either an irq
3602 	 * context (see bio.c and btrfs_put_ordered_extent()) or a non-irq
3603 	 * context.
3604 	 */
3605 	spin_lock_irqsave(&fs_info->delayed_iput_lock, flags);
3606 	ASSERT(list_empty(&inode->delayed_iput));
3607 	list_add_tail(&inode->delayed_iput, &fs_info->delayed_iputs);
3608 	spin_unlock_irqrestore(&fs_info->delayed_iput_lock, flags);
3609 	if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags))
3610 		wake_up_process(fs_info->cleaner_kthread);
3611 }
3612 
run_delayed_iput_locked(struct btrfs_fs_info * fs_info,struct btrfs_inode * inode)3613 static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info,
3614 				    struct btrfs_inode *inode)
3615 {
3616 	list_del_init(&inode->delayed_iput);
3617 	spin_unlock_irq(&fs_info->delayed_iput_lock);
3618 	iput(&inode->vfs_inode);
3619 	if (atomic_dec_and_test(&fs_info->nr_delayed_iputs))
3620 		wake_up(&fs_info->delayed_iputs_wait);
3621 	spin_lock_irq(&fs_info->delayed_iput_lock);
3622 }
3623 
btrfs_run_delayed_iput(struct btrfs_fs_info * fs_info,struct btrfs_inode * inode)3624 static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info,
3625 				   struct btrfs_inode *inode)
3626 {
3627 	if (!list_empty(&inode->delayed_iput)) {
3628 		spin_lock_irq(&fs_info->delayed_iput_lock);
3629 		if (!list_empty(&inode->delayed_iput))
3630 			run_delayed_iput_locked(fs_info, inode);
3631 		spin_unlock_irq(&fs_info->delayed_iput_lock);
3632 	}
3633 }
3634 
btrfs_run_delayed_iputs(struct btrfs_fs_info * fs_info)3635 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
3636 {
3637 	/*
3638 	 * btrfs_put_ordered_extent() can run in irq context (see bio.c), which
3639 	 * calls btrfs_add_delayed_iput() and that needs to lock
3640 	 * fs_info->delayed_iput_lock. So we need to disable irqs here to
3641 	 * prevent a deadlock.
3642 	 */
3643 	spin_lock_irq(&fs_info->delayed_iput_lock);
3644 	while (!list_empty(&fs_info->delayed_iputs)) {
3645 		struct btrfs_inode *inode;
3646 
3647 		inode = list_first_entry(&fs_info->delayed_iputs,
3648 				struct btrfs_inode, delayed_iput);
3649 		run_delayed_iput_locked(fs_info, inode);
3650 		if (need_resched()) {
3651 			spin_unlock_irq(&fs_info->delayed_iput_lock);
3652 			cond_resched();
3653 			spin_lock_irq(&fs_info->delayed_iput_lock);
3654 		}
3655 	}
3656 	spin_unlock_irq(&fs_info->delayed_iput_lock);
3657 }
3658 
3659 /*
3660  * Wait for flushing all delayed iputs
3661  *
3662  * @fs_info:  the filesystem
3663  *
3664  * This will wait on any delayed iputs that are currently running with KILLABLE
3665  * set.  Once they are all done running we will return, unless we are killed in
3666  * which case we return EINTR. This helps in user operations like fallocate etc
3667  * that might get blocked on the iputs.
3668  *
3669  * Return EINTR if we were killed, 0 if nothing's pending
3670  */
btrfs_wait_on_delayed_iputs(struct btrfs_fs_info * fs_info)3671 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info)
3672 {
3673 	int ret = wait_event_killable(fs_info->delayed_iputs_wait,
3674 			atomic_read(&fs_info->nr_delayed_iputs) == 0);
3675 	if (ret)
3676 		return -EINTR;
3677 	return 0;
3678 }
3679 
3680 /*
3681  * This creates an orphan entry for the given inode in case something goes wrong
3682  * in the middle of an unlink.
3683  */
btrfs_orphan_add(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)3684 int btrfs_orphan_add(struct btrfs_trans_handle *trans,
3685 		     struct btrfs_inode *inode)
3686 {
3687 	int ret;
3688 
3689 	ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode));
3690 	if (unlikely(ret && ret != -EEXIST)) {
3691 		btrfs_abort_transaction(trans, ret);
3692 		return ret;
3693 	}
3694 
3695 	return 0;
3696 }
3697 
3698 /*
3699  * We have done the delete so we can go ahead and remove the orphan item for
3700  * this particular inode.
3701  */
btrfs_orphan_del(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)3702 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3703 			    struct btrfs_inode *inode)
3704 {
3705 	return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode));
3706 }
3707 
3708 /*
3709  * this cleans up any orphans that may be left on the list from the last use
3710  * of this root.
3711  */
btrfs_orphan_cleanup(struct btrfs_root * root)3712 int btrfs_orphan_cleanup(struct btrfs_root *root)
3713 {
3714 	struct btrfs_fs_info *fs_info = root->fs_info;
3715 	BTRFS_PATH_AUTO_FREE(path);
3716 	struct extent_buffer *leaf;
3717 	struct btrfs_key key, found_key;
3718 	struct btrfs_trans_handle *trans;
3719 	u64 last_objectid = 0;
3720 	int ret = 0, nr_unlink = 0;
3721 
3722 	if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state))
3723 		return 0;
3724 
3725 	path = btrfs_alloc_path();
3726 	if (!path) {
3727 		ret = -ENOMEM;
3728 		goto out;
3729 	}
3730 	path->reada = READA_BACK;
3731 
3732 	key.objectid = BTRFS_ORPHAN_OBJECTID;
3733 	key.type = BTRFS_ORPHAN_ITEM_KEY;
3734 	key.offset = (u64)-1;
3735 
3736 	while (1) {
3737 		struct btrfs_inode *inode;
3738 
3739 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3740 		if (ret < 0)
3741 			goto out;
3742 
3743 		/*
3744 		 * if ret == 0 means we found what we were searching for, which
3745 		 * is weird, but possible, so only screw with path if we didn't
3746 		 * find the key and see if we have stuff that matches
3747 		 */
3748 		if (ret > 0) {
3749 			ret = 0;
3750 			if (path->slots[0] == 0)
3751 				break;
3752 			path->slots[0]--;
3753 		}
3754 
3755 		/* pull out the item */
3756 		leaf = path->nodes[0];
3757 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3758 
3759 		/* make sure the item matches what we want */
3760 		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3761 			break;
3762 		if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
3763 			break;
3764 
3765 		/* release the path since we're done with it */
3766 		btrfs_release_path(path);
3767 
3768 		/*
3769 		 * this is where we are basically btrfs_lookup, without the
3770 		 * crossing root thing.  we store the inode number in the
3771 		 * offset of the orphan item.
3772 		 */
3773 
3774 		if (found_key.offset == last_objectid) {
3775 			/*
3776 			 * We found the same inode as before. This means we were
3777 			 * not able to remove its items via eviction triggered
3778 			 * by an iput(). A transaction abort may have happened,
3779 			 * due to -ENOSPC for example, so try to grab the error
3780 			 * that lead to a transaction abort, if any.
3781 			 */
3782 			btrfs_err(fs_info,
3783 				  "Error removing orphan entry, stopping orphan cleanup");
3784 			ret = BTRFS_FS_ERROR(fs_info) ?: -EINVAL;
3785 			goto out;
3786 		}
3787 
3788 		last_objectid = found_key.offset;
3789 
3790 		found_key.objectid = found_key.offset;
3791 		found_key.type = BTRFS_INODE_ITEM_KEY;
3792 		found_key.offset = 0;
3793 		inode = btrfs_iget(last_objectid, root);
3794 		if (IS_ERR(inode)) {
3795 			ret = PTR_ERR(inode);
3796 			inode = NULL;
3797 			if (ret != -ENOENT)
3798 				goto out;
3799 		}
3800 
3801 		if (!inode && root == fs_info->tree_root) {
3802 			struct btrfs_root *dead_root;
3803 			int is_dead_root = 0;
3804 
3805 			/*
3806 			 * This is an orphan in the tree root. Currently these
3807 			 * could come from 2 sources:
3808 			 *  a) a root (snapshot/subvolume) deletion in progress
3809 			 *  b) a free space cache inode
3810 			 * We need to distinguish those two, as the orphan item
3811 			 * for a root must not get deleted before the deletion
3812 			 * of the snapshot/subvolume's tree completes.
3813 			 *
3814 			 * btrfs_find_orphan_roots() ran before us, which has
3815 			 * found all deleted roots and loaded them into
3816 			 * fs_info->fs_roots_radix. So here we can find if an
3817 			 * orphan item corresponds to a deleted root by looking
3818 			 * up the root from that radix tree.
3819 			 */
3820 
3821 			spin_lock(&fs_info->fs_roots_radix_lock);
3822 			dead_root = radix_tree_lookup(&fs_info->fs_roots_radix,
3823 							 (unsigned long)found_key.objectid);
3824 			if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0)
3825 				is_dead_root = 1;
3826 			spin_unlock(&fs_info->fs_roots_radix_lock);
3827 
3828 			if (is_dead_root) {
3829 				/* prevent this orphan from being found again */
3830 				key.offset = found_key.objectid - 1;
3831 				continue;
3832 			}
3833 
3834 		}
3835 
3836 		/*
3837 		 * If we have an inode with links, there are a couple of
3838 		 * possibilities:
3839 		 *
3840 		 * 1. We were halfway through creating fsverity metadata for the
3841 		 * file. In that case, the orphan item represents incomplete
3842 		 * fsverity metadata which must be cleaned up with
3843 		 * btrfs_drop_verity_items and deleting the orphan item.
3844 
3845 		 * 2. Old kernels (before v3.12) used to create an
3846 		 * orphan item for truncate indicating that there were possibly
3847 		 * extent items past i_size that needed to be deleted. In v3.12,
3848 		 * truncate was changed to update i_size in sync with the extent
3849 		 * items, but the (useless) orphan item was still created. Since
3850 		 * v4.18, we don't create the orphan item for truncate at all.
3851 		 *
3852 		 * So, this item could mean that we need to do a truncate, but
3853 		 * only if this filesystem was last used on a pre-v3.12 kernel
3854 		 * and was not cleanly unmounted. The odds of that are quite
3855 		 * slim, and it's a pain to do the truncate now, so just delete
3856 		 * the orphan item.
3857 		 *
3858 		 * It's also possible that this orphan item was supposed to be
3859 		 * deleted but wasn't. The inode number may have been reused,
3860 		 * but either way, we can delete the orphan item.
3861 		 */
3862 		if (!inode || inode->vfs_inode.i_nlink) {
3863 			if (inode) {
3864 				ret = btrfs_drop_verity_items(inode);
3865 				iput(&inode->vfs_inode);
3866 				inode = NULL;
3867 				if (ret)
3868 					goto out;
3869 			}
3870 			trans = btrfs_start_transaction(root, 1);
3871 			if (IS_ERR(trans)) {
3872 				ret = PTR_ERR(trans);
3873 				goto out;
3874 			}
3875 			btrfs_debug(fs_info, "auto deleting %Lu",
3876 				    found_key.objectid);
3877 			ret = btrfs_del_orphan_item(trans, root,
3878 						    found_key.objectid);
3879 			btrfs_end_transaction(trans);
3880 			if (ret)
3881 				goto out;
3882 			continue;
3883 		}
3884 
3885 		nr_unlink++;
3886 
3887 		/* this will do delete_inode and everything for us */
3888 		iput(&inode->vfs_inode);
3889 	}
3890 	/* release the path since we're done with it */
3891 	btrfs_release_path(path);
3892 
3893 	if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
3894 		trans = btrfs_join_transaction(root);
3895 		if (!IS_ERR(trans))
3896 			btrfs_end_transaction(trans);
3897 	}
3898 
3899 	if (nr_unlink)
3900 		btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink);
3901 
3902 out:
3903 	if (ret)
3904 		btrfs_err(fs_info, "could not do orphan cleanup %d", ret);
3905 	return ret;
3906 }
3907 
3908 /*
3909  * Look ahead in the leaf for xattrs. If we don't find any then we know there
3910  * can't be any ACLs.
3911  *
3912  * @leaf:       the eb leaf where to search
3913  * @slot:       the slot the inode is in
3914  * @objectid:   the objectid of the inode
3915  *
3916  * Return true if there is xattr/ACL, false otherwise.
3917  */
acls_after_inode_item(struct extent_buffer * leaf,int slot,u64 objectid,int * first_xattr_slot)3918 static noinline bool acls_after_inode_item(struct extent_buffer *leaf,
3919 					   int slot, u64 objectid,
3920 					   int *first_xattr_slot)
3921 {
3922 	u32 nritems = btrfs_header_nritems(leaf);
3923 	struct btrfs_key found_key;
3924 	static u64 xattr_access = 0;
3925 	static u64 xattr_default = 0;
3926 	int scanned = 0;
3927 
3928 	if (!xattr_access) {
3929 		xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS,
3930 					strlen(XATTR_NAME_POSIX_ACL_ACCESS));
3931 		xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT,
3932 					strlen(XATTR_NAME_POSIX_ACL_DEFAULT));
3933 	}
3934 
3935 	slot++;
3936 	*first_xattr_slot = -1;
3937 	while (slot < nritems) {
3938 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3939 
3940 		/* We found a different objectid, there must be no ACLs. */
3941 		if (found_key.objectid != objectid)
3942 			return false;
3943 
3944 		/* We found an xattr, assume we've got an ACL. */
3945 		if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3946 			if (*first_xattr_slot == -1)
3947 				*first_xattr_slot = slot;
3948 			if (found_key.offset == xattr_access ||
3949 			    found_key.offset == xattr_default)
3950 				return true;
3951 		}
3952 
3953 		/*
3954 		 * We found a key greater than an xattr key, there can't be any
3955 		 * ACLs later on.
3956 		 */
3957 		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3958 			return false;
3959 
3960 		slot++;
3961 		scanned++;
3962 
3963 		/*
3964 		 * The item order goes like:
3965 		 * - inode
3966 		 * - inode backrefs
3967 		 * - xattrs
3968 		 * - extents,
3969 		 *
3970 		 * so if there are lots of hard links to an inode there can be
3971 		 * a lot of backrefs.  Don't waste time searching too hard,
3972 		 * this is just an optimization.
3973 		 */
3974 		if (scanned >= 8)
3975 			break;
3976 	}
3977 	/*
3978 	 * We hit the end of the leaf before we found an xattr or something
3979 	 * larger than an xattr.  We have to assume the inode has ACLs.
3980 	 */
3981 	if (*first_xattr_slot == -1)
3982 		*first_xattr_slot = slot;
3983 	return true;
3984 }
3985 
btrfs_init_file_extent_tree(struct btrfs_inode * inode)3986 static int btrfs_init_file_extent_tree(struct btrfs_inode *inode)
3987 {
3988 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3989 
3990 	if (WARN_ON_ONCE(inode->file_extent_tree))
3991 		return 0;
3992 	if (btrfs_fs_incompat(fs_info, NO_HOLES))
3993 		return 0;
3994 	if (!S_ISREG(inode->vfs_inode.i_mode))
3995 		return 0;
3996 	if (btrfs_is_free_space_inode(inode))
3997 		return 0;
3998 
3999 	inode->file_extent_tree = kmalloc_obj(struct extent_io_tree);
4000 	if (!inode->file_extent_tree)
4001 		return -ENOMEM;
4002 
4003 	btrfs_extent_io_tree_init(fs_info, inode->file_extent_tree,
4004 				  IO_TREE_INODE_FILE_EXTENT);
4005 	/* Lockdep class is set only for the file extent tree. */
4006 	lockdep_set_class(&inode->file_extent_tree->lock, &file_extent_tree_class);
4007 
4008 	return 0;
4009 }
4010 
btrfs_add_inode_to_root(struct btrfs_inode * inode,bool prealloc)4011 static int btrfs_add_inode_to_root(struct btrfs_inode *inode, bool prealloc)
4012 {
4013 	struct btrfs_root *root = inode->root;
4014 	struct btrfs_inode *existing;
4015 	const u64 ino = btrfs_ino(inode);
4016 	int ret;
4017 
4018 	if (inode_unhashed(&inode->vfs_inode))
4019 		return 0;
4020 
4021 	if (prealloc) {
4022 		ret = xa_reserve(&root->inodes, ino, GFP_NOFS);
4023 		if (ret)
4024 			return ret;
4025 	}
4026 
4027 	existing = xa_store(&root->inodes, ino, inode, GFP_ATOMIC);
4028 
4029 	if (xa_is_err(existing)) {
4030 		ret = xa_err(existing);
4031 		ASSERT(ret != -EINVAL);
4032 		ASSERT(ret != -ENOMEM);
4033 		return ret;
4034 	} else if (existing) {
4035 		WARN_ON(!(inode_state_read_once(&existing->vfs_inode) & (I_WILL_FREE | I_FREEING)));
4036 	}
4037 
4038 	return 0;
4039 }
4040 
4041 /*
4042  * Read a locked inode from the btree into the in-memory inode and add it to
4043  * its root list/tree.
4044  *
4045  * On failure clean up the inode.
4046  */
btrfs_read_locked_inode(struct btrfs_inode * inode,struct btrfs_path * path)4047 static int btrfs_read_locked_inode(struct btrfs_inode *inode, struct btrfs_path *path)
4048 {
4049 	struct btrfs_root *root = inode->root;
4050 	struct btrfs_fs_info *fs_info = root->fs_info;
4051 	struct extent_buffer *leaf;
4052 	struct btrfs_inode_item *inode_item;
4053 	struct inode *vfs_inode = &inode->vfs_inode;
4054 	struct btrfs_key location;
4055 	unsigned long ptr;
4056 	int maybe_acls;
4057 	u32 rdev;
4058 	int ret;
4059 	bool filled = false;
4060 	int first_xattr_slot;
4061 
4062 	ret = btrfs_fill_inode(inode, &rdev);
4063 	if (!ret)
4064 		filled = true;
4065 
4066 	ASSERT(path);
4067 
4068 	btrfs_get_inode_key(inode, &location);
4069 
4070 	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
4071 	if (ret) {
4072 		/*
4073 		 * ret > 0 can come from btrfs_search_slot called by
4074 		 * btrfs_lookup_inode(), this means the inode was not found.
4075 		 */
4076 		if (ret > 0)
4077 			ret = -ENOENT;
4078 		goto out;
4079 	}
4080 
4081 	leaf = path->nodes[0];
4082 
4083 	if (filled)
4084 		goto cache_index;
4085 
4086 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
4087 				    struct btrfs_inode_item);
4088 	vfs_inode->i_mode = btrfs_inode_mode(leaf, inode_item);
4089 	set_nlink(vfs_inode, btrfs_inode_nlink(leaf, inode_item));
4090 	i_uid_write(vfs_inode, btrfs_inode_uid(leaf, inode_item));
4091 	i_gid_write(vfs_inode, btrfs_inode_gid(leaf, inode_item));
4092 	btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
4093 
4094 	inode_set_atime(vfs_inode, btrfs_timespec_sec(leaf, &inode_item->atime),
4095 			btrfs_timespec_nsec(leaf, &inode_item->atime));
4096 
4097 	inode_set_mtime(vfs_inode, btrfs_timespec_sec(leaf, &inode_item->mtime),
4098 			btrfs_timespec_nsec(leaf, &inode_item->mtime));
4099 
4100 	inode_set_ctime(vfs_inode, btrfs_timespec_sec(leaf, &inode_item->ctime),
4101 			btrfs_timespec_nsec(leaf, &inode_item->ctime));
4102 
4103 	inode->i_otime_sec = btrfs_timespec_sec(leaf, &inode_item->otime);
4104 	inode->i_otime_nsec = btrfs_timespec_nsec(leaf, &inode_item->otime);
4105 
4106 	inode_set_bytes(vfs_inode, btrfs_inode_nbytes(leaf, inode_item));
4107 	inode->generation = btrfs_inode_generation(leaf, inode_item);
4108 	inode->last_trans = btrfs_inode_transid(leaf, inode_item);
4109 
4110 	inode_set_iversion_queried(vfs_inode, btrfs_inode_sequence(leaf, inode_item));
4111 	vfs_inode->i_generation = inode->generation;
4112 	vfs_inode->i_rdev = 0;
4113 	rdev = btrfs_inode_rdev(leaf, inode_item);
4114 
4115 	if (S_ISDIR(vfs_inode->i_mode))
4116 		inode->index_cnt = (u64)-1;
4117 
4118 	btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item),
4119 				&inode->flags, &inode->ro_flags);
4120 	btrfs_update_inode_mapping_flags(inode);
4121 	btrfs_set_inode_mapping_order(inode);
4122 
4123 cache_index:
4124 	/*
4125 	 * If we were modified in the current generation and evicted from memory
4126 	 * and then re-read we need to do a full sync since we don't have any
4127 	 * idea about which extents were modified before we were evicted from
4128 	 * cache.
4129 	 *
4130 	 * This is required for both inode re-read from disk and delayed inode
4131 	 * in the delayed_nodes xarray.
4132 	 */
4133 	if (inode->last_trans == btrfs_get_fs_generation(fs_info))
4134 		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
4135 
4136 	/*
4137 	 * We don't persist the id of the transaction where an unlink operation
4138 	 * against the inode was last made. So here we assume the inode might
4139 	 * have been evicted, and therefore the exact value of last_unlink_trans
4140 	 * lost, and set it to last_trans to avoid metadata inconsistencies
4141 	 * between the inode and its parent if the inode is fsync'ed and the log
4142 	 * replayed. For example, in the scenario:
4143 	 *
4144 	 * touch mydir/foo
4145 	 * ln mydir/foo mydir/bar
4146 	 * sync
4147 	 * unlink mydir/bar
4148 	 * echo 2 > /proc/sys/vm/drop_caches   # evicts inode
4149 	 * xfs_io -c fsync mydir/foo
4150 	 * <power failure>
4151 	 * mount fs, triggers fsync log replay
4152 	 *
4153 	 * We must make sure that when we fsync our inode foo we also log its
4154 	 * parent inode, otherwise after log replay the parent still has the
4155 	 * dentry with the "bar" name but our inode foo has a link count of 1
4156 	 * and doesn't have an inode ref with the name "bar" anymore.
4157 	 *
4158 	 * Setting last_unlink_trans to last_trans is a pessimistic approach,
4159 	 * but it guarantees correctness at the expense of occasional full
4160 	 * transaction commits on fsync if our inode is a directory, or if our
4161 	 * inode is not a directory, logging its parent unnecessarily.
4162 	 */
4163 	inode->last_unlink_trans = inode->last_trans;
4164 
4165 	/*
4166 	 * Same logic as for last_unlink_trans. We don't persist the generation
4167 	 * of the last transaction where this inode was used for a reflink
4168 	 * operation, so after eviction and reloading the inode we must be
4169 	 * pessimistic and assume the last transaction that modified the inode.
4170 	 */
4171 	inode->last_reflink_trans = inode->last_trans;
4172 
4173 	path->slots[0]++;
4174 	if (vfs_inode->i_nlink != 1 ||
4175 	    path->slots[0] >= btrfs_header_nritems(leaf))
4176 		goto cache_acl;
4177 
4178 	btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
4179 	if (location.objectid != btrfs_ino(inode))
4180 		goto cache_acl;
4181 
4182 	ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4183 	if (location.type == BTRFS_INODE_REF_KEY) {
4184 		struct btrfs_inode_ref *ref;
4185 
4186 		ref = (struct btrfs_inode_ref *)ptr;
4187 		inode->dir_index = btrfs_inode_ref_index(leaf, ref);
4188 	} else if (location.type == BTRFS_INODE_EXTREF_KEY) {
4189 		struct btrfs_inode_extref *extref;
4190 
4191 		extref = (struct btrfs_inode_extref *)ptr;
4192 		inode->dir_index = btrfs_inode_extref_index(leaf, extref);
4193 	}
4194 cache_acl:
4195 	/*
4196 	 * try to precache a NULL acl entry for files that don't have
4197 	 * any xattrs or acls
4198 	 */
4199 	maybe_acls = acls_after_inode_item(leaf, path->slots[0],
4200 					   btrfs_ino(inode), &first_xattr_slot);
4201 	if (first_xattr_slot != -1) {
4202 		path->slots[0] = first_xattr_slot;
4203 		ret = btrfs_load_inode_props(inode, path);
4204 		if (ret)
4205 			btrfs_err(fs_info,
4206 				  "error loading props for ino %llu (root %llu): %d",
4207 				  btrfs_ino(inode), btrfs_root_id(root), ret);
4208 	}
4209 
4210 	/*
4211 	 * We don't need the path anymore, so release it to avoid holding a read
4212 	 * lock on a leaf while calling btrfs_init_file_extent_tree(), which can
4213 	 * allocate memory that triggers reclaim (GFP_KERNEL) and cause a locking
4214 	 * dependency.
4215 	 */
4216 	btrfs_release_path(path);
4217 
4218 	ret = btrfs_init_file_extent_tree(inode);
4219 	if (ret)
4220 		goto out;
4221 	btrfs_inode_set_file_extent_range(inode, 0,
4222 			  round_up(i_size_read(vfs_inode), fs_info->sectorsize));
4223 
4224 	if (!maybe_acls)
4225 		cache_no_acl(vfs_inode);
4226 
4227 	switch (vfs_inode->i_mode & S_IFMT) {
4228 	case S_IFREG:
4229 		vfs_inode->i_mapping->a_ops = &btrfs_aops;
4230 		vfs_inode->i_fop = &btrfs_file_operations;
4231 		vfs_inode->i_op = &btrfs_file_inode_operations;
4232 		break;
4233 	case S_IFDIR:
4234 		vfs_inode->i_fop = &btrfs_dir_file_operations;
4235 		vfs_inode->i_op = &btrfs_dir_inode_operations;
4236 		break;
4237 	case S_IFLNK:
4238 		vfs_inode->i_op = &btrfs_symlink_inode_operations;
4239 		inode_nohighmem(vfs_inode);
4240 		vfs_inode->i_mapping->a_ops = &btrfs_aops;
4241 		break;
4242 	default:
4243 		vfs_inode->i_op = &btrfs_special_inode_operations;
4244 		init_special_inode(vfs_inode, vfs_inode->i_mode, rdev);
4245 		break;
4246 	}
4247 
4248 	btrfs_sync_inode_flags_to_i_flags(inode);
4249 
4250 	ret = btrfs_add_inode_to_root(inode, true);
4251 	if (ret)
4252 		goto out;
4253 
4254 	return 0;
4255 out:
4256 	/*
4257 	 * We may have a read locked leaf and iget_failed() triggers inode
4258 	 * eviction which needs to release the delayed inode and that needs
4259 	 * to lock the delayed inode's mutex. This can cause a ABBA deadlock
4260 	 * with a task running delayed items, as that require first locking
4261 	 * the delayed inode's mutex and then modifying its subvolume btree.
4262 	 * So release the path before iget_failed().
4263 	 */
4264 	btrfs_release_path(path);
4265 	iget_failed(vfs_inode);
4266 	return ret;
4267 }
4268 
4269 /*
4270  * given a leaf and an inode, copy the inode fields into the leaf
4271  */
fill_inode_item(struct btrfs_trans_handle * trans,struct extent_buffer * leaf,struct btrfs_inode_item * item,struct inode * inode)4272 static void fill_inode_item(struct btrfs_trans_handle *trans,
4273 			    struct extent_buffer *leaf,
4274 			    struct btrfs_inode_item *item,
4275 			    struct inode *inode)
4276 {
4277 	u64 flags;
4278 
4279 	btrfs_set_inode_uid(leaf, item, i_uid_read(inode));
4280 	btrfs_set_inode_gid(leaf, item, i_gid_read(inode));
4281 	btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
4282 	btrfs_set_inode_mode(leaf, item, inode->i_mode);
4283 	btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
4284 
4285 	btrfs_set_timespec_sec(leaf, &item->atime, inode_get_atime_sec(inode));
4286 	btrfs_set_timespec_nsec(leaf, &item->atime, inode_get_atime_nsec(inode));
4287 
4288 	btrfs_set_timespec_sec(leaf, &item->mtime, inode_get_mtime_sec(inode));
4289 	btrfs_set_timespec_nsec(leaf, &item->mtime, inode_get_mtime_nsec(inode));
4290 
4291 	btrfs_set_timespec_sec(leaf, &item->ctime, inode_get_ctime_sec(inode));
4292 	btrfs_set_timespec_nsec(leaf, &item->ctime, inode_get_ctime_nsec(inode));
4293 
4294 	btrfs_set_timespec_sec(leaf, &item->otime, BTRFS_I(inode)->i_otime_sec);
4295 	btrfs_set_timespec_nsec(leaf, &item->otime, BTRFS_I(inode)->i_otime_nsec);
4296 
4297 	btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
4298 	btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
4299 	btrfs_set_inode_sequence(leaf, item, inode_peek_iversion(inode));
4300 	btrfs_set_inode_transid(leaf, item, trans->transid);
4301 	btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
4302 	flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
4303 					  BTRFS_I(inode)->ro_flags);
4304 	btrfs_set_inode_flags(leaf, item, flags);
4305 	btrfs_set_inode_block_group(leaf, item, 0);
4306 }
4307 
4308 /*
4309  * copy everything in the in-memory inode into the btree.
4310  */
btrfs_update_inode_item(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)4311 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
4312 					    struct btrfs_inode *inode)
4313 {
4314 	struct btrfs_inode_item *inode_item;
4315 	BTRFS_PATH_AUTO_FREE(path);
4316 	struct extent_buffer *leaf;
4317 	struct btrfs_key key;
4318 	int ret;
4319 
4320 	path = btrfs_alloc_path();
4321 	if (!path)
4322 		return -ENOMEM;
4323 
4324 	btrfs_get_inode_key(inode, &key);
4325 	ret = btrfs_lookup_inode(trans, inode->root, path, &key, 1);
4326 	if (ret) {
4327 		if (ret > 0)
4328 			ret = -ENOENT;
4329 		return ret;
4330 	}
4331 
4332 	leaf = path->nodes[0];
4333 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
4334 				    struct btrfs_inode_item);
4335 
4336 	fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode);
4337 	btrfs_set_inode_last_trans(trans, inode);
4338 	return 0;
4339 }
4340 
4341 /*
4342  * copy everything in the in-memory inode into the btree.
4343  */
btrfs_update_inode(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)4344 int btrfs_update_inode(struct btrfs_trans_handle *trans,
4345 		       struct btrfs_inode *inode)
4346 {
4347 	struct btrfs_root *root = inode->root;
4348 	struct btrfs_fs_info *fs_info = root->fs_info;
4349 	int ret;
4350 
4351 	/*
4352 	 * If the inode is a free space inode, we can deadlock during commit
4353 	 * if we put it into the delayed code.
4354 	 *
4355 	 * The data relocation inode should also be directly updated
4356 	 * without delay
4357 	 */
4358 	if (!btrfs_is_free_space_inode(inode)
4359 	    && !btrfs_is_data_reloc_root(root)
4360 	    && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
4361 		btrfs_update_root_times(trans, root);
4362 
4363 		ret = btrfs_delayed_update_inode(trans, inode);
4364 		if (!ret)
4365 			btrfs_set_inode_last_trans(trans, inode);
4366 		return ret;
4367 	}
4368 
4369 	return btrfs_update_inode_item(trans, inode);
4370 }
4371 
btrfs_update_inode_fallback(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)4372 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
4373 				struct btrfs_inode *inode)
4374 {
4375 	int ret;
4376 
4377 	ret = btrfs_update_inode(trans, inode);
4378 	if (ret == -ENOSPC)
4379 		return btrfs_update_inode_item(trans, inode);
4380 	return ret;
4381 }
4382 
update_time_after_link_or_unlink(struct btrfs_inode * dir)4383 static void update_time_after_link_or_unlink(struct btrfs_inode *dir)
4384 {
4385 	struct timespec64 now;
4386 
4387 	/*
4388 	 * If we are replaying a log tree, we do not want to update the mtime
4389 	 * and ctime of the parent directory with the current time, since the
4390 	 * log replay procedure is responsible for setting them to their correct
4391 	 * values (the ones it had when the fsync was done).
4392 	 */
4393 	if (test_bit(BTRFS_FS_LOG_RECOVERING, &dir->root->fs_info->flags))
4394 		return;
4395 
4396 	now = inode_set_ctime_current(&dir->vfs_inode);
4397 	inode_set_mtime_to_ts(&dir->vfs_inode, now);
4398 }
4399 
4400 /*
4401  * unlink helper that gets used here in inode.c and in the tree logging
4402  * recovery code.  It remove a link in a directory with a given name, and
4403  * also drops the back refs in the inode to the directory
4404  */
__btrfs_unlink_inode(struct btrfs_trans_handle * trans,struct btrfs_inode * dir,struct btrfs_inode * inode,const struct fscrypt_str * name,struct btrfs_rename_ctx * rename_ctx)4405 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4406 				struct btrfs_inode *dir,
4407 				struct btrfs_inode *inode,
4408 				const struct fscrypt_str *name,
4409 				struct btrfs_rename_ctx *rename_ctx)
4410 {
4411 	struct btrfs_root *root = dir->root;
4412 	struct btrfs_fs_info *fs_info = root->fs_info;
4413 	struct btrfs_path *path;
4414 	int ret = 0;
4415 	struct btrfs_dir_item *di;
4416 	u64 index;
4417 	u64 ino = btrfs_ino(inode);
4418 	u64 dir_ino = btrfs_ino(dir);
4419 
4420 	path = btrfs_alloc_path();
4421 	if (!path)
4422 		return -ENOMEM;
4423 
4424 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, -1);
4425 	if (IS_ERR_OR_NULL(di)) {
4426 		btrfs_free_path(path);
4427 		return di ? PTR_ERR(di) : -ENOENT;
4428 	}
4429 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
4430 	/*
4431 	 * Down the call chains below we'll also need to allocate a path, so no
4432 	 * need to hold on to this one for longer than necessary.
4433 	 */
4434 	btrfs_free_path(path);
4435 	if (ret)
4436 		return ret;
4437 
4438 	/*
4439 	 * If we don't have dir index, we have to get it by looking up
4440 	 * the inode ref, since we get the inode ref, remove it directly,
4441 	 * it is unnecessary to do delayed deletion.
4442 	 *
4443 	 * But if we have dir index, needn't search inode ref to get it.
4444 	 * Since the inode ref is close to the inode item, it is better
4445 	 * that we delay to delete it, and just do this deletion when
4446 	 * we update the inode item.
4447 	 */
4448 	if (inode->dir_index) {
4449 		ret = btrfs_delayed_delete_inode_ref(inode);
4450 		if (!ret) {
4451 			index = inode->dir_index;
4452 			goto skip_backref;
4453 		}
4454 	}
4455 
4456 	ret = btrfs_del_inode_ref(trans, root, name, ino, dir_ino, &index);
4457 	if (unlikely(ret)) {
4458 		btrfs_crit(fs_info,
4459 	   "failed to delete reference to %.*s, root %llu inode %llu parent %llu",
4460 			   name->len, name->name, btrfs_root_id(root), ino, dir_ino);
4461 		btrfs_abort_transaction(trans, ret);
4462 		return ret;
4463 	}
4464 skip_backref:
4465 	if (rename_ctx)
4466 		rename_ctx->index = index;
4467 
4468 	ret = btrfs_delete_delayed_dir_index(trans, dir, index);
4469 	if (unlikely(ret)) {
4470 		btrfs_abort_transaction(trans, ret);
4471 		return ret;
4472 	}
4473 
4474 	/*
4475 	 * If we are in a rename context, we don't need to update anything in the
4476 	 * log. That will be done later during the rename by btrfs_log_new_name().
4477 	 * Besides that, doing it here would only cause extra unnecessary btree
4478 	 * operations on the log tree, increasing latency for applications.
4479 	 */
4480 	if (!rename_ctx) {
4481 		btrfs_del_inode_ref_in_log(trans, name, inode, dir);
4482 		btrfs_del_dir_entries_in_log(trans, name, dir, index);
4483 	}
4484 
4485 	/*
4486 	 * If we have a pending delayed iput we could end up with the final iput
4487 	 * being run in btrfs-cleaner context.  If we have enough of these built
4488 	 * up we can end up burning a lot of time in btrfs-cleaner without any
4489 	 * way to throttle the unlinks.  Since we're currently holding a ref on
4490 	 * the inode we can run the delayed iput here without any issues as the
4491 	 * final iput won't be done until after we drop the ref we're currently
4492 	 * holding.
4493 	 */
4494 	btrfs_run_delayed_iput(fs_info, inode);
4495 
4496 	btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2);
4497 	inode_inc_iversion(&inode->vfs_inode);
4498 	inode_set_ctime_current(&inode->vfs_inode);
4499 	inode_inc_iversion(&dir->vfs_inode);
4500 	update_time_after_link_or_unlink(dir);
4501 
4502 	return btrfs_update_inode(trans, dir);
4503 }
4504 
btrfs_unlink_inode(struct btrfs_trans_handle * trans,struct btrfs_inode * dir,struct btrfs_inode * inode,const struct fscrypt_str * name)4505 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4506 		       struct btrfs_inode *dir, struct btrfs_inode *inode,
4507 		       const struct fscrypt_str *name)
4508 {
4509 	int ret;
4510 
4511 	ret = __btrfs_unlink_inode(trans, dir, inode, name, NULL);
4512 	if (!ret) {
4513 		drop_nlink(&inode->vfs_inode);
4514 		ret = btrfs_update_inode(trans, inode);
4515 	}
4516 	return ret;
4517 }
4518 
4519 /*
4520  * helper to start transaction for unlink and rmdir.
4521  *
4522  * unlink and rmdir are special in btrfs, they do not always free space, so
4523  * if we cannot make our reservations the normal way try and see if there is
4524  * plenty of slack room in the global reserve to migrate, otherwise we cannot
4525  * allow the unlink to occur.
4526  */
__unlink_start_trans(struct btrfs_inode * dir)4527 static struct btrfs_trans_handle *__unlink_start_trans(struct btrfs_inode *dir)
4528 {
4529 	struct btrfs_root *root = dir->root;
4530 
4531 	return btrfs_start_transaction_fallback_global_rsv(root,
4532 						   BTRFS_UNLINK_METADATA_UNITS);
4533 }
4534 
btrfs_unlink(struct inode * dir,struct dentry * dentry)4535 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
4536 {
4537 	struct btrfs_trans_handle *trans;
4538 	struct inode *inode = d_inode(dentry);
4539 	int ret;
4540 	struct fscrypt_name fname;
4541 
4542 	ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
4543 	if (ret)
4544 		return ret;
4545 
4546 	/* This needs to handle no-key deletions later on */
4547 
4548 	trans = __unlink_start_trans(BTRFS_I(dir));
4549 	if (IS_ERR(trans)) {
4550 		ret = PTR_ERR(trans);
4551 		goto fscrypt_free;
4552 	}
4553 
4554 	btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4555 				false);
4556 
4557 	ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4558 				 &fname.disk_name);
4559 	if (ret)
4560 		goto end_trans;
4561 
4562 	if (inode->i_nlink == 0) {
4563 		ret = btrfs_orphan_add(trans, BTRFS_I(inode));
4564 		if (ret)
4565 			goto end_trans;
4566 	}
4567 
4568 end_trans:
4569 	btrfs_end_transaction(trans);
4570 	btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info);
4571 fscrypt_free:
4572 	fscrypt_free_filename(&fname);
4573 	return ret;
4574 }
4575 
btrfs_unlink_subvol(struct btrfs_trans_handle * trans,struct btrfs_inode * dir,struct dentry * dentry)4576 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4577 			       struct btrfs_inode *dir, struct dentry *dentry)
4578 {
4579 	struct btrfs_root *root = dir->root;
4580 	struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
4581 	BTRFS_PATH_AUTO_FREE(path);
4582 	struct extent_buffer *leaf;
4583 	struct btrfs_dir_item *di;
4584 	struct btrfs_key key;
4585 	u64 index;
4586 	int ret;
4587 	u64 objectid;
4588 	u64 dir_ino = btrfs_ino(dir);
4589 	struct fscrypt_name fname;
4590 
4591 	ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname);
4592 	if (ret)
4593 		return ret;
4594 
4595 	/* This needs to handle no-key deletions later on */
4596 
4597 	if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
4598 		objectid = btrfs_root_id(inode->root);
4599 	} else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4600 		objectid = inode->ref_root_id;
4601 	} else {
4602 		WARN_ON(1);
4603 		fscrypt_free_filename(&fname);
4604 		return -EINVAL;
4605 	}
4606 
4607 	path = btrfs_alloc_path();
4608 	if (!path) {
4609 		ret = -ENOMEM;
4610 		goto out;
4611 	}
4612 
4613 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4614 				   &fname.disk_name, -1);
4615 	if (IS_ERR_OR_NULL(di)) {
4616 		ret = di ? PTR_ERR(di) : -ENOENT;
4617 		goto out;
4618 	}
4619 
4620 	leaf = path->nodes[0];
4621 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
4622 	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
4623 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
4624 	if (unlikely(ret)) {
4625 		btrfs_abort_transaction(trans, ret);
4626 		goto out;
4627 	}
4628 	btrfs_release_path(path);
4629 
4630 	/*
4631 	 * This is a placeholder inode for a subvolume we didn't have a
4632 	 * reference to at the time of the snapshot creation.  In the meantime
4633 	 * we could have renamed the real subvol link into our snapshot, so
4634 	 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect.
4635 	 * Instead simply lookup the dir_index_item for this entry so we can
4636 	 * remove it.  Otherwise we know we have a ref to the root and we can
4637 	 * call btrfs_del_root_ref, and it _shouldn't_ fail.
4638 	 */
4639 	if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4640 		di = btrfs_search_dir_index_item(root, path, dir_ino, &fname.disk_name);
4641 		if (IS_ERR(di)) {
4642 			ret = PTR_ERR(di);
4643 			btrfs_abort_transaction(trans, ret);
4644 			goto out;
4645 		}
4646 
4647 		leaf = path->nodes[0];
4648 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4649 		index = key.offset;
4650 		btrfs_release_path(path);
4651 	} else {
4652 		ret = btrfs_del_root_ref(trans, objectid,
4653 					 btrfs_root_id(root), dir_ino,
4654 					 &index, &fname.disk_name);
4655 		if (unlikely(ret)) {
4656 			btrfs_abort_transaction(trans, ret);
4657 			goto out;
4658 		}
4659 	}
4660 
4661 	ret = btrfs_delete_delayed_dir_index(trans, dir, index);
4662 	if (unlikely(ret)) {
4663 		btrfs_abort_transaction(trans, ret);
4664 		goto out;
4665 	}
4666 
4667 	btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2);
4668 	inode_inc_iversion(&dir->vfs_inode);
4669 	inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode));
4670 	ret = btrfs_update_inode_fallback(trans, dir);
4671 	if (ret)
4672 		btrfs_abort_transaction(trans, ret);
4673 out:
4674 	fscrypt_free_filename(&fname);
4675 	return ret;
4676 }
4677 
4678 /*
4679  * Helper to check if the subvolume references other subvolumes or if it's
4680  * default.
4681  */
may_destroy_subvol(struct btrfs_root * root)4682 static noinline int may_destroy_subvol(struct btrfs_root *root)
4683 {
4684 	struct btrfs_fs_info *fs_info = root->fs_info;
4685 	BTRFS_PATH_AUTO_FREE(path);
4686 	struct btrfs_dir_item *di;
4687 	struct btrfs_key key;
4688 	struct fscrypt_str name = FSTR_INIT("default", 7);
4689 	u64 dir_id;
4690 	int ret;
4691 
4692 	path = btrfs_alloc_path();
4693 	if (!path)
4694 		return -ENOMEM;
4695 
4696 	/* Make sure this root isn't set as the default subvol */
4697 	dir_id = btrfs_super_root_dir(fs_info->super_copy);
4698 	di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path,
4699 				   dir_id, &name, 0);
4700 	if (di && !IS_ERR(di)) {
4701 		btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
4702 		if (key.objectid == btrfs_root_id(root)) {
4703 			ret = -EPERM;
4704 			btrfs_err(fs_info,
4705 				  "deleting default subvolume %llu is not allowed",
4706 				  key.objectid);
4707 			return ret;
4708 		}
4709 		btrfs_release_path(path);
4710 	}
4711 
4712 	key.objectid = btrfs_root_id(root);
4713 	key.type = BTRFS_ROOT_REF_KEY;
4714 	key.offset = (u64)-1;
4715 
4716 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4717 	if (ret < 0)
4718 		return ret;
4719 	if (unlikely(ret == 0)) {
4720 		/*
4721 		 * Key with offset -1 found, there would have to exist a root
4722 		 * with such id, but this is out of valid range.
4723 		 */
4724 		return -EUCLEAN;
4725 	}
4726 
4727 	ret = 0;
4728 	if (path->slots[0] > 0) {
4729 		path->slots[0]--;
4730 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
4731 		if (key.objectid == btrfs_root_id(root) && key.type == BTRFS_ROOT_REF_KEY)
4732 			ret = -ENOTEMPTY;
4733 	}
4734 
4735 	return ret;
4736 }
4737 
4738 /* Delete all dentries for inodes belonging to the root */
btrfs_prune_dentries(struct btrfs_root * root)4739 static void btrfs_prune_dentries(struct btrfs_root *root)
4740 {
4741 	struct btrfs_fs_info *fs_info = root->fs_info;
4742 	struct btrfs_inode *inode;
4743 	u64 min_ino = 0;
4744 
4745 	if (!BTRFS_FS_ERROR(fs_info))
4746 		WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4747 
4748 	inode = btrfs_find_first_inode(root, min_ino);
4749 	while (inode) {
4750 		if (icount_read(&inode->vfs_inode) > 1)
4751 			d_prune_aliases(&inode->vfs_inode);
4752 
4753 		min_ino = btrfs_ino(inode) + 1;
4754 		/*
4755 		 * btrfs_drop_inode() will have it removed from the inode
4756 		 * cache when its usage count hits zero.
4757 		 */
4758 		iput(&inode->vfs_inode);
4759 		cond_resched();
4760 		inode = btrfs_find_first_inode(root, min_ino);
4761 	}
4762 }
4763 
btrfs_delete_subvolume(struct btrfs_inode * dir,struct dentry * dentry)4764 int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
4765 {
4766 	struct btrfs_root *root = dir->root;
4767 	struct btrfs_fs_info *fs_info = root->fs_info;
4768 	struct inode *inode = d_inode(dentry);
4769 	struct btrfs_root *dest = BTRFS_I(inode)->root;
4770 	struct btrfs_trans_handle *trans;
4771 	struct btrfs_block_rsv block_rsv;
4772 	u64 root_flags;
4773 	u64 qgroup_reserved = 0;
4774 	int ret;
4775 
4776 	down_write(&fs_info->subvol_sem);
4777 
4778 	/*
4779 	 * Don't allow to delete a subvolume with send in progress. This is
4780 	 * inside the inode lock so the error handling that has to drop the bit
4781 	 * again is not run concurrently.
4782 	 */
4783 	spin_lock(&dest->root_item_lock);
4784 	if (dest->send_in_progress) {
4785 		spin_unlock(&dest->root_item_lock);
4786 		btrfs_warn(fs_info,
4787 			   "attempt to delete subvolume %llu during send",
4788 			   btrfs_root_id(dest));
4789 		ret = -EPERM;
4790 		goto out_up_write;
4791 	}
4792 	if (atomic_read(&dest->nr_swapfiles)) {
4793 		spin_unlock(&dest->root_item_lock);
4794 		btrfs_warn(fs_info,
4795 			   "attempt to delete subvolume %llu with active swapfile",
4796 			   btrfs_root_id(dest));
4797 		ret = -EPERM;
4798 		goto out_up_write;
4799 	}
4800 	root_flags = btrfs_root_flags(&dest->root_item);
4801 	btrfs_set_root_flags(&dest->root_item,
4802 			     root_flags | BTRFS_ROOT_SUBVOL_DEAD);
4803 	spin_unlock(&dest->root_item_lock);
4804 
4805 	ret = may_destroy_subvol(dest);
4806 	if (ret)
4807 		goto out_undead;
4808 
4809 	btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
4810 	/*
4811 	 * One for dir inode,
4812 	 * two for dir entries,
4813 	 * two for root ref/backref.
4814 	 */
4815 	ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
4816 	if (ret)
4817 		goto out_undead;
4818 	qgroup_reserved = block_rsv.qgroup_rsv_reserved;
4819 
4820 	trans = btrfs_start_transaction(root, 0);
4821 	if (IS_ERR(trans)) {
4822 		ret = PTR_ERR(trans);
4823 		goto out_release;
4824 	}
4825 	btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
4826 	qgroup_reserved = 0;
4827 	trans->block_rsv = &block_rsv;
4828 	trans->bytes_reserved = block_rsv.size;
4829 
4830 	btrfs_record_snapshot_destroy(trans, dir);
4831 
4832 	ret = btrfs_unlink_subvol(trans, dir, dentry);
4833 	if (unlikely(ret)) {
4834 		btrfs_abort_transaction(trans, ret);
4835 		goto out_end_trans;
4836 	}
4837 
4838 	ret = btrfs_record_root_in_trans(trans, dest);
4839 	if (unlikely(ret)) {
4840 		btrfs_abort_transaction(trans, ret);
4841 		goto out_end_trans;
4842 	}
4843 
4844 	memset(&dest->root_item.drop_progress, 0,
4845 		sizeof(dest->root_item.drop_progress));
4846 	btrfs_set_root_drop_level(&dest->root_item, 0);
4847 	btrfs_set_root_refs(&dest->root_item, 0);
4848 
4849 	if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
4850 		ret = btrfs_insert_orphan_item(trans,
4851 					fs_info->tree_root,
4852 					btrfs_root_id(dest));
4853 		if (unlikely(ret)) {
4854 			btrfs_abort_transaction(trans, ret);
4855 			goto out_end_trans;
4856 		}
4857 	}
4858 
4859 	ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid,
4860 				     BTRFS_UUID_KEY_SUBVOL, btrfs_root_id(dest));
4861 	if (unlikely(ret && ret != -ENOENT)) {
4862 		btrfs_abort_transaction(trans, ret);
4863 		goto out_end_trans;
4864 	}
4865 	if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
4866 		ret = btrfs_uuid_tree_remove(trans,
4867 					  dest->root_item.received_uuid,
4868 					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4869 					  btrfs_root_id(dest));
4870 		if (unlikely(ret && ret != -ENOENT)) {
4871 			btrfs_abort_transaction(trans, ret);
4872 			goto out_end_trans;
4873 		}
4874 	}
4875 
4876 	free_anon_bdev(dest->anon_dev);
4877 	dest->anon_dev = 0;
4878 out_end_trans:
4879 	trans->block_rsv = NULL;
4880 	trans->bytes_reserved = 0;
4881 	ret = btrfs_end_transaction(trans);
4882 	inode->i_flags |= S_DEAD;
4883 out_release:
4884 	btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL);
4885 	if (qgroup_reserved)
4886 		btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
4887 out_undead:
4888 	if (ret) {
4889 		spin_lock(&dest->root_item_lock);
4890 		root_flags = btrfs_root_flags(&dest->root_item);
4891 		btrfs_set_root_flags(&dest->root_item,
4892 				root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
4893 		spin_unlock(&dest->root_item_lock);
4894 	}
4895 out_up_write:
4896 	up_write(&fs_info->subvol_sem);
4897 	if (!ret) {
4898 		d_invalidate(dentry);
4899 		btrfs_prune_dentries(dest);
4900 		ASSERT(dest->send_in_progress == 0);
4901 	}
4902 
4903 	return ret;
4904 }
4905 
btrfs_rmdir(struct inode * vfs_dir,struct dentry * dentry)4906 static int btrfs_rmdir(struct inode *vfs_dir, struct dentry *dentry)
4907 {
4908 	struct btrfs_inode *dir = BTRFS_I(vfs_dir);
4909 	struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
4910 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
4911 	int ret = 0;
4912 	struct btrfs_trans_handle *trans;
4913 	struct fscrypt_name fname;
4914 
4915 	if (inode->vfs_inode.i_size > BTRFS_EMPTY_DIR_SIZE)
4916 		return -ENOTEMPTY;
4917 	if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
4918 		if (unlikely(btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))) {
4919 			btrfs_err(fs_info,
4920 			"extent tree v2 doesn't support snapshot deletion yet");
4921 			return -EOPNOTSUPP;
4922 		}
4923 		return btrfs_delete_subvolume(dir, dentry);
4924 	}
4925 
4926 	ret = fscrypt_setup_filename(vfs_dir, &dentry->d_name, 1, &fname);
4927 	if (ret)
4928 		return ret;
4929 
4930 	/* This needs to handle no-key deletions later on */
4931 
4932 	trans = __unlink_start_trans(dir);
4933 	if (IS_ERR(trans)) {
4934 		ret = PTR_ERR(trans);
4935 		goto out_notrans;
4936 	}
4937 
4938 	/*
4939 	 * Propagate the last_unlink_trans value of the deleted dir to its
4940 	 * parent directory. This is to prevent an unrecoverable log tree in the
4941 	 * case we do something like this:
4942 	 * 1) create dir foo
4943 	 * 2) create snapshot under dir foo
4944 	 * 3) delete the snapshot
4945 	 * 4) rmdir foo
4946 	 * 5) mkdir foo
4947 	 * 6) fsync foo or some file inside foo
4948 	 *
4949 	 * This is because we can't unlink other roots when replaying the dir
4950 	 * deletes for directory foo.
4951 	 */
4952 	if (inode->last_unlink_trans >= trans->transid)
4953 		btrfs_record_snapshot_destroy(trans, dir);
4954 
4955 	if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4956 		ret = btrfs_unlink_subvol(trans, dir, dentry);
4957 		goto out;
4958 	}
4959 
4960 	ret = btrfs_orphan_add(trans, inode);
4961 	if (ret)
4962 		goto out;
4963 
4964 	/* now the directory is empty */
4965 	ret = btrfs_unlink_inode(trans, dir, inode, &fname.disk_name);
4966 	if (!ret)
4967 		btrfs_i_size_write(inode, 0);
4968 out:
4969 	btrfs_end_transaction(trans);
4970 out_notrans:
4971 	btrfs_btree_balance_dirty(fs_info);
4972 	fscrypt_free_filename(&fname);
4973 
4974 	return ret;
4975 }
4976 
is_inside_block(u64 bytenr,u64 blockstart,u32 blocksize)4977 static bool is_inside_block(u64 bytenr, u64 blockstart, u32 blocksize)
4978 {
4979 	ASSERT(IS_ALIGNED(blockstart, blocksize), "blockstart=%llu blocksize=%u",
4980 		blockstart, blocksize);
4981 
4982 	if (blockstart <= bytenr && bytenr <= blockstart + blocksize - 1)
4983 		return true;
4984 	return false;
4985 }
4986 
truncate_block_zero_beyond_eof(struct btrfs_inode * inode,u64 start)4987 static int truncate_block_zero_beyond_eof(struct btrfs_inode *inode, u64 start)
4988 {
4989 	const pgoff_t index = (start >> PAGE_SHIFT);
4990 	struct address_space *mapping = inode->vfs_inode.i_mapping;
4991 	struct folio *folio;
4992 	u64 zero_start;
4993 	u64 zero_end;
4994 	int ret = 0;
4995 
4996 again:
4997 	folio = filemap_lock_folio(mapping, index);
4998 	/* No folio present. */
4999 	if (IS_ERR(folio))
5000 		return 0;
5001 
5002 	if (!folio_test_uptodate(folio)) {
5003 		ret = btrfs_read_folio(NULL, folio);
5004 		folio_lock(folio);
5005 		if (folio->mapping != mapping) {
5006 			folio_unlock(folio);
5007 			folio_put(folio);
5008 			goto again;
5009 		}
5010 		if (unlikely(!folio_test_uptodate(folio))) {
5011 			ret = -EIO;
5012 			goto out_unlock;
5013 		}
5014 	}
5015 	folio_wait_writeback(folio);
5016 
5017 	/*
5018 	 * We do not need to lock extents nor wait for OE, as it's already
5019 	 * beyond EOF.
5020 	 */
5021 
5022 	zero_start = max_t(u64, folio_pos(folio), start);
5023 	zero_end = folio_next_pos(folio);
5024 	folio_zero_range(folio, zero_start - folio_pos(folio),
5025 			 zero_end - zero_start);
5026 
5027 out_unlock:
5028 	folio_unlock(folio);
5029 	folio_put(folio);
5030 	return ret;
5031 }
5032 
5033 /*
5034  * Handle the truncation of a fs block.
5035  *
5036  * @inode  - inode that we're zeroing
5037  * @offset - the file offset of the block to truncate
5038  *           The value must be inside [@start, @end], and the function will do
5039  *           extra checks if the block that covers @offset needs to be zeroed.
5040  * @start  - the start file offset of the range we want to zero
5041  * @end    - the end (inclusive) file offset of the range we want to zero.
5042  *
5043  * If the range is not block aligned, read out the folio that covers @offset,
5044  * and if needed zero blocks that are inside the folio and covered by [@start, @end).
5045  * If @start or @end + 1 lands inside a block, that block will be marked dirty
5046  * for writeback.
5047  *
5048  * This is utilized by hole punch, zero range, file expansion.
5049  */
btrfs_truncate_block(struct btrfs_inode * inode,u64 offset,u64 start,u64 end)5050 int btrfs_truncate_block(struct btrfs_inode *inode, u64 offset, u64 start, u64 end)
5051 {
5052 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
5053 	struct address_space *mapping = inode->vfs_inode.i_mapping;
5054 	struct extent_io_tree *io_tree = &inode->io_tree;
5055 	struct btrfs_ordered_extent *ordered;
5056 	struct extent_state *cached_state = NULL;
5057 	struct extent_changeset *data_reserved = NULL;
5058 	bool only_release_metadata = false;
5059 	u32 blocksize = fs_info->sectorsize;
5060 	pgoff_t index = (offset >> PAGE_SHIFT);
5061 	struct folio *folio;
5062 	gfp_t mask = btrfs_alloc_write_mask(mapping);
5063 	int ret = 0;
5064 	const bool in_head_block = is_inside_block(offset, round_down(start, blocksize),
5065 						   blocksize);
5066 	const bool in_tail_block = is_inside_block(offset, round_down(end, blocksize),
5067 						   blocksize);
5068 	bool need_truncate_head = false;
5069 	bool need_truncate_tail = false;
5070 	u64 zero_start;
5071 	u64 zero_end;
5072 	u64 block_start;
5073 	u64 block_end;
5074 
5075 	/* @offset should be inside the range. */
5076 	ASSERT(start <= offset && offset <= end, "offset=%llu start=%llu end=%llu",
5077 	       offset, start, end);
5078 
5079 	/* The range is aligned at both ends. */
5080 	if (IS_ALIGNED(start, blocksize) && IS_ALIGNED(end + 1, blocksize)) {
5081 		/*
5082 		 * For block size < page size case, we may have polluted blocks
5083 		 * beyond EOF. So we also need to zero them out.
5084 		 */
5085 		if (end == (u64)-1 && blocksize < PAGE_SIZE)
5086 			ret = truncate_block_zero_beyond_eof(inode, start);
5087 		goto out;
5088 	}
5089 
5090 	/*
5091 	 * @offset may not be inside the head nor tail block. In that case we
5092 	 * don't need to do anything.
5093 	 */
5094 	if (!in_head_block && !in_tail_block)
5095 		goto out;
5096 
5097 	/*
5098 	 * Skip the truncation if the range in the target block is already aligned.
5099 	 * The seemingly complex check will also handle the same block case.
5100 	 */
5101 	if (in_head_block && !IS_ALIGNED(start, blocksize))
5102 		need_truncate_head = true;
5103 	if (in_tail_block && !IS_ALIGNED(end + 1, blocksize))
5104 		need_truncate_tail = true;
5105 	if (!need_truncate_head && !need_truncate_tail)
5106 		goto out;
5107 
5108 	block_start = round_down(offset, blocksize);
5109 	block_end = block_start + blocksize - 1;
5110 
5111 	ret = btrfs_check_data_free_space(inode, &data_reserved, block_start,
5112 					  blocksize, false);
5113 	if (ret < 0) {
5114 		size_t write_bytes = blocksize;
5115 
5116 		if (btrfs_check_nocow_lock(inode, block_start, &write_bytes, false) > 0) {
5117 			/* For nocow case, no need to reserve data space. */
5118 			ASSERT(write_bytes == blocksize, "write_bytes=%zu blocksize=%u",
5119 			       write_bytes, blocksize);
5120 			only_release_metadata = true;
5121 		} else {
5122 			goto out;
5123 		}
5124 	}
5125 	ret = btrfs_delalloc_reserve_metadata(inode, blocksize, blocksize, false);
5126 	if (ret < 0) {
5127 		if (!only_release_metadata)
5128 			btrfs_free_reserved_data_space(inode, data_reserved,
5129 						       block_start, blocksize);
5130 		goto out;
5131 	}
5132 again:
5133 	folio = __filemap_get_folio(mapping, index,
5134 				    FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask);
5135 	if (IS_ERR(folio)) {
5136 		if (only_release_metadata)
5137 			btrfs_delalloc_release_metadata(inode, blocksize, true);
5138 		else
5139 			btrfs_delalloc_release_space(inode, data_reserved,
5140 						     block_start, blocksize, true);
5141 		btrfs_delalloc_release_extents(inode, blocksize);
5142 		ret = PTR_ERR(folio);
5143 		goto out;
5144 	}
5145 
5146 	if (!folio_test_uptodate(folio)) {
5147 		ret = btrfs_read_folio(NULL, folio);
5148 		folio_lock(folio);
5149 		if (folio->mapping != mapping) {
5150 			folio_unlock(folio);
5151 			folio_put(folio);
5152 			goto again;
5153 		}
5154 		if (unlikely(!folio_test_uptodate(folio))) {
5155 			ret = -EIO;
5156 			goto out_unlock;
5157 		}
5158 	}
5159 
5160 	/*
5161 	 * We unlock the page after the io is completed and then re-lock it
5162 	 * above.  release_folio() could have come in between that and cleared
5163 	 * folio private, but left the page in the mapping.  Set the page mapped
5164 	 * here to make sure it's properly set for the subpage stuff.
5165 	 */
5166 	ret = set_folio_extent_mapped(folio);
5167 	if (ret < 0)
5168 		goto out_unlock;
5169 
5170 	folio_wait_writeback(folio);
5171 
5172 	btrfs_lock_extent(io_tree, block_start, block_end, &cached_state);
5173 
5174 	ordered = btrfs_lookup_ordered_extent(inode, block_start);
5175 	if (ordered) {
5176 		btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
5177 		folio_unlock(folio);
5178 		folio_put(folio);
5179 		btrfs_start_ordered_extent(ordered);
5180 		btrfs_put_ordered_extent(ordered);
5181 		goto again;
5182 	}
5183 
5184 	btrfs_clear_extent_bit(&inode->io_tree, block_start, block_end,
5185 			       EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
5186 			       &cached_state);
5187 
5188 	ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
5189 					&cached_state);
5190 	if (ret) {
5191 		btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
5192 		goto out_unlock;
5193 	}
5194 
5195 	if (end == (u64)-1) {
5196 		/*
5197 		 * We're truncating beyond EOF, the remaining blocks normally are
5198 		 * already holes thus no need to zero again, but it's possible for
5199 		 * fs block size < page size cases to have memory mapped writes
5200 		 * to pollute ranges beyond EOF.
5201 		 *
5202 		 * In that case although such polluted blocks beyond EOF will
5203 		 * not reach disk, it still affects our page caches.
5204 		 */
5205 		zero_start = max_t(u64, folio_pos(folio), start);
5206 		zero_end = min_t(u64, folio_next_pos(folio) - 1, end);
5207 	} else {
5208 		zero_start = max_t(u64, block_start, start);
5209 		zero_end = min_t(u64, block_end, end);
5210 	}
5211 	folio_zero_range(folio, zero_start - folio_pos(folio),
5212 			 zero_end - zero_start + 1);
5213 
5214 	btrfs_folio_clear_checked(fs_info, folio, block_start,
5215 				  block_end + 1 - block_start);
5216 	btrfs_folio_set_dirty(fs_info, folio, block_start,
5217 			      block_end + 1 - block_start);
5218 
5219 	if (only_release_metadata)
5220 		btrfs_set_extent_bit(&inode->io_tree, block_start, block_end,
5221 				     EXTENT_NORESERVE, &cached_state);
5222 
5223 	btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
5224 
5225 out_unlock:
5226 	if (ret) {
5227 		if (only_release_metadata)
5228 			btrfs_delalloc_release_metadata(inode, blocksize, true);
5229 		else
5230 			btrfs_delalloc_release_space(inode, data_reserved,
5231 					block_start, blocksize, true);
5232 	}
5233 	btrfs_delalloc_release_extents(inode, blocksize);
5234 	folio_unlock(folio);
5235 	folio_put(folio);
5236 out:
5237 	if (only_release_metadata)
5238 		btrfs_check_nocow_unlock(inode);
5239 	extent_changeset_free(data_reserved);
5240 	return ret;
5241 }
5242 
maybe_insert_hole(struct btrfs_inode * inode,u64 offset,u64 len)5243 static int maybe_insert_hole(struct btrfs_inode *inode, u64 offset, u64 len)
5244 {
5245 	struct btrfs_root *root = inode->root;
5246 	struct btrfs_fs_info *fs_info = root->fs_info;
5247 	struct btrfs_trans_handle *trans;
5248 	struct btrfs_drop_extents_args drop_args = { 0 };
5249 	int ret;
5250 
5251 	/*
5252 	 * If NO_HOLES is enabled, we don't need to do anything.
5253 	 * Later, up in the call chain, either btrfs_set_inode_last_sub_trans()
5254 	 * or btrfs_update_inode() will be called, which guarantee that the next
5255 	 * fsync will know this inode was changed and needs to be logged.
5256 	 */
5257 	if (btrfs_fs_incompat(fs_info, NO_HOLES))
5258 		return 0;
5259 
5260 	/*
5261 	 * 1 - for the one we're dropping
5262 	 * 1 - for the one we're adding
5263 	 * 1 - for updating the inode.
5264 	 */
5265 	trans = btrfs_start_transaction(root, 3);
5266 	if (IS_ERR(trans))
5267 		return PTR_ERR(trans);
5268 
5269 	drop_args.start = offset;
5270 	drop_args.end = offset + len;
5271 	drop_args.drop_cache = true;
5272 
5273 	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
5274 	if (unlikely(ret)) {
5275 		btrfs_abort_transaction(trans, ret);
5276 		btrfs_end_transaction(trans);
5277 		return ret;
5278 	}
5279 
5280 	ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset, len);
5281 	if (ret) {
5282 		btrfs_abort_transaction(trans, ret);
5283 	} else {
5284 		btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found);
5285 		btrfs_update_inode(trans, inode);
5286 	}
5287 	btrfs_end_transaction(trans);
5288 	return ret;
5289 }
5290 
5291 /*
5292  * This function puts in dummy file extents for the area we're creating a hole
5293  * for.  So if we are truncating this file to a larger size we need to insert
5294  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
5295  * the range between oldsize and size
5296  */
btrfs_cont_expand(struct btrfs_inode * inode,loff_t oldsize,loff_t size)5297 int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
5298 {
5299 	struct btrfs_root *root = inode->root;
5300 	struct btrfs_fs_info *fs_info = root->fs_info;
5301 	struct extent_io_tree *io_tree = &inode->io_tree;
5302 	struct extent_map *em = NULL;
5303 	struct extent_state *cached_state = NULL;
5304 	u64 hole_start = ALIGN(oldsize, fs_info->sectorsize);
5305 	u64 block_end = ALIGN(size, fs_info->sectorsize);
5306 	u64 last_byte;
5307 	u64 cur_offset;
5308 	u64 hole_size;
5309 	int ret = 0;
5310 
5311 	/*
5312 	 * If our size started in the middle of a block we need to zero out the
5313 	 * rest of the block before we expand the i_size, otherwise we could
5314 	 * expose stale data.
5315 	 */
5316 	ret = btrfs_truncate_block(inode, oldsize, oldsize, -1);
5317 	if (ret)
5318 		return ret;
5319 
5320 	if (size <= hole_start)
5321 		return 0;
5322 
5323 	btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1,
5324 					   &cached_state);
5325 	cur_offset = hole_start;
5326 	while (1) {
5327 		em = btrfs_get_extent(inode, NULL, cur_offset, block_end - cur_offset);
5328 		if (IS_ERR(em)) {
5329 			ret = PTR_ERR(em);
5330 			em = NULL;
5331 			break;
5332 		}
5333 		last_byte = min(btrfs_extent_map_end(em), block_end);
5334 		last_byte = ALIGN(last_byte, fs_info->sectorsize);
5335 		hole_size = last_byte - cur_offset;
5336 
5337 		if (!(em->flags & EXTENT_FLAG_PREALLOC)) {
5338 			struct extent_map *hole_em;
5339 
5340 			ret = maybe_insert_hole(inode, cur_offset, hole_size);
5341 			if (ret)
5342 				break;
5343 
5344 			ret = btrfs_inode_set_file_extent_range(inode,
5345 							cur_offset, hole_size);
5346 			if (ret)
5347 				break;
5348 
5349 			hole_em = btrfs_alloc_extent_map();
5350 			if (!hole_em) {
5351 				btrfs_drop_extent_map_range(inode, cur_offset,
5352 						    cur_offset + hole_size - 1,
5353 						    false);
5354 				btrfs_set_inode_full_sync(inode);
5355 				goto next;
5356 			}
5357 			hole_em->start = cur_offset;
5358 			hole_em->len = hole_size;
5359 
5360 			hole_em->disk_bytenr = EXTENT_MAP_HOLE;
5361 			hole_em->disk_num_bytes = 0;
5362 			hole_em->ram_bytes = hole_size;
5363 			hole_em->generation = btrfs_get_fs_generation(fs_info);
5364 
5365 			ret = btrfs_replace_extent_map_range(inode, hole_em, true);
5366 			btrfs_free_extent_map(hole_em);
5367 		} else {
5368 			ret = btrfs_inode_set_file_extent_range(inode,
5369 							cur_offset, hole_size);
5370 			if (ret)
5371 				break;
5372 		}
5373 next:
5374 		btrfs_free_extent_map(em);
5375 		em = NULL;
5376 		cur_offset = last_byte;
5377 		if (cur_offset >= block_end)
5378 			break;
5379 	}
5380 	btrfs_free_extent_map(em);
5381 	btrfs_unlock_extent(io_tree, hole_start, block_end - 1, &cached_state);
5382 	return ret;
5383 }
5384 
btrfs_setsize(struct inode * inode,struct iattr * attr)5385 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
5386 {
5387 	struct btrfs_root *root = BTRFS_I(inode)->root;
5388 	struct btrfs_trans_handle *trans;
5389 	loff_t oldsize = i_size_read(inode);
5390 	loff_t newsize = attr->ia_size;
5391 	int mask = attr->ia_valid;
5392 	int ret;
5393 
5394 	/*
5395 	 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
5396 	 * special case where we need to update the times despite not having
5397 	 * these flags set.  For all other operations the VFS set these flags
5398 	 * explicitly if it wants a timestamp update.
5399 	 */
5400 	if (newsize != oldsize) {
5401 		inode_inc_iversion(inode);
5402 		if (!(mask & (ATTR_CTIME | ATTR_MTIME))) {
5403 			inode_set_mtime_to_ts(inode,
5404 					      inode_set_ctime_current(inode));
5405 		}
5406 	}
5407 
5408 	if (newsize > oldsize) {
5409 		/*
5410 		 * Don't do an expanding truncate while snapshotting is ongoing.
5411 		 * This is to ensure the snapshot captures a fully consistent
5412 		 * state of this file - if the snapshot captures this expanding
5413 		 * truncation, it must capture all writes that happened before
5414 		 * this truncation.
5415 		 */
5416 		btrfs_drew_write_lock(&root->snapshot_lock);
5417 		ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, newsize);
5418 		if (ret) {
5419 			btrfs_drew_write_unlock(&root->snapshot_lock);
5420 			return ret;
5421 		}
5422 
5423 		trans = btrfs_start_transaction(root, 1);
5424 		if (IS_ERR(trans)) {
5425 			btrfs_drew_write_unlock(&root->snapshot_lock);
5426 			return PTR_ERR(trans);
5427 		}
5428 
5429 		i_size_write(inode, newsize);
5430 		btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
5431 		pagecache_isize_extended(inode, oldsize, newsize);
5432 		ret = btrfs_update_inode(trans, BTRFS_I(inode));
5433 		btrfs_drew_write_unlock(&root->snapshot_lock);
5434 		btrfs_end_transaction(trans);
5435 	} else {
5436 		struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
5437 
5438 		if (btrfs_is_zoned(fs_info)) {
5439 			ret = btrfs_wait_ordered_range(BTRFS_I(inode),
5440 					ALIGN(newsize, fs_info->sectorsize),
5441 					(u64)-1);
5442 			if (ret)
5443 				return ret;
5444 		}
5445 
5446 		/*
5447 		 * We're truncating a file that used to have good data down to
5448 		 * zero. Make sure any new writes to the file get on disk
5449 		 * on close.
5450 		 */
5451 		if (newsize == 0)
5452 			set_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
5453 				&BTRFS_I(inode)->runtime_flags);
5454 
5455 		truncate_setsize(inode, newsize);
5456 
5457 		inode_dio_wait(inode);
5458 
5459 		ret = btrfs_truncate(BTRFS_I(inode), newsize == oldsize);
5460 		if (ret && inode->i_nlink) {
5461 			int ret2;
5462 
5463 			/*
5464 			 * Truncate failed, so fix up the in-memory size. We
5465 			 * adjusted disk_i_size down as we removed extents, so
5466 			 * wait for disk_i_size to be stable and then update the
5467 			 * in-memory size to match.
5468 			 */
5469 			ret2 = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
5470 			if (ret2)
5471 				return ret2;
5472 			i_size_write(inode, BTRFS_I(inode)->disk_i_size);
5473 		}
5474 	}
5475 
5476 	return ret;
5477 }
5478 
btrfs_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)5479 static int btrfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
5480 			 struct iattr *attr)
5481 {
5482 	struct inode *inode = d_inode(dentry);
5483 	struct btrfs_root *root = BTRFS_I(inode)->root;
5484 	int ret;
5485 
5486 	if (btrfs_root_readonly(root))
5487 		return -EROFS;
5488 
5489 	ret = setattr_prepare(idmap, dentry, attr);
5490 	if (ret)
5491 		return ret;
5492 
5493 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
5494 		ret = btrfs_setsize(inode, attr);
5495 		if (ret)
5496 			return ret;
5497 	}
5498 
5499 	if (attr->ia_valid) {
5500 		setattr_copy(idmap, inode, attr);
5501 		inode_inc_iversion(inode);
5502 		ret = btrfs_dirty_inode(BTRFS_I(inode));
5503 
5504 		if (!ret && attr->ia_valid & ATTR_MODE)
5505 			ret = posix_acl_chmod(idmap, dentry, inode->i_mode);
5506 	}
5507 
5508 	return ret;
5509 }
5510 
5511 /*
5512  * While truncating the inode pages during eviction, we get the VFS
5513  * calling btrfs_invalidate_folio() against each folio of the inode. This
5514  * is slow because the calls to btrfs_invalidate_folio() result in a
5515  * huge amount of calls to lock_extent() and clear_extent_bit(),
5516  * which keep merging and splitting extent_state structures over and over,
5517  * wasting lots of time.
5518  *
5519  * Therefore if the inode is being evicted, let btrfs_invalidate_folio()
5520  * skip all those expensive operations on a per folio basis and do only
5521  * the ordered io finishing, while we release here the extent_map and
5522  * extent_state structures, without the excessive merging and splitting.
5523  */
evict_inode_truncate_pages(struct inode * inode)5524 static void evict_inode_truncate_pages(struct inode *inode)
5525 {
5526 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5527 	struct rb_node *node;
5528 
5529 	ASSERT(inode_state_read_once(inode) & I_FREEING);
5530 	truncate_inode_pages_final(&inode->i_data);
5531 
5532 	btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
5533 
5534 	/*
5535 	 * Keep looping until we have no more ranges in the io tree.
5536 	 * We can have ongoing bios started by readahead that have
5537 	 * their endio callback (extent_io.c:end_bio_extent_readpage)
5538 	 * still in progress (unlocked the pages in the bio but did not yet
5539 	 * unlocked the ranges in the io tree). Therefore this means some
5540 	 * ranges can still be locked and eviction started because before
5541 	 * submitting those bios, which are executed by a separate task (work
5542 	 * queue kthread), inode references (inode->i_count) were not taken
5543 	 * (which would be dropped in the end io callback of each bio).
5544 	 * Therefore here we effectively end up waiting for those bios and
5545 	 * anyone else holding locked ranges without having bumped the inode's
5546 	 * reference count - if we don't do it, when they access the inode's
5547 	 * io_tree to unlock a range it may be too late, leading to an
5548 	 * use-after-free issue.
5549 	 */
5550 	spin_lock(&io_tree->lock);
5551 	while (!RB_EMPTY_ROOT(&io_tree->state)) {
5552 		struct extent_state *state;
5553 		struct extent_state *cached_state = NULL;
5554 		u64 start;
5555 		u64 end;
5556 		unsigned state_flags;
5557 
5558 		node = rb_first(&io_tree->state);
5559 		state = rb_entry(node, struct extent_state, rb_node);
5560 		start = state->start;
5561 		end = state->end;
5562 		state_flags = state->state;
5563 		spin_unlock(&io_tree->lock);
5564 
5565 		btrfs_lock_extent(io_tree, start, end, &cached_state);
5566 
5567 		/*
5568 		 * If still has DELALLOC flag, the extent didn't reach disk,
5569 		 * and its reserved space won't be freed by delayed_ref.
5570 		 * So we need to free its reserved space here.
5571 		 * (Refer to comment in btrfs_invalidate_folio, case 2)
5572 		 *
5573 		 * Note, end is the bytenr of last byte, so we need + 1 here.
5574 		 */
5575 		if (state_flags & EXTENT_DELALLOC)
5576 			btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start,
5577 					       end - start + 1, NULL);
5578 
5579 		btrfs_clear_extent_bit(io_tree, start, end,
5580 				       EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING,
5581 				       &cached_state);
5582 
5583 		cond_resched();
5584 		spin_lock(&io_tree->lock);
5585 	}
5586 	spin_unlock(&io_tree->lock);
5587 }
5588 
evict_refill_and_join(struct btrfs_root * root,struct btrfs_block_rsv * rsv)5589 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
5590 							struct btrfs_block_rsv *rsv)
5591 {
5592 	struct btrfs_fs_info *fs_info = root->fs_info;
5593 	struct btrfs_trans_handle *trans;
5594 	u64 delayed_refs_extra = btrfs_calc_delayed_ref_bytes(fs_info, 1);
5595 	int ret;
5596 
5597 	/*
5598 	 * Eviction should be taking place at some place safe because of our
5599 	 * delayed iputs.  However the normal flushing code will run delayed
5600 	 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock.
5601 	 *
5602 	 * We reserve the delayed_refs_extra here again because we can't use
5603 	 * btrfs_start_transaction(root, 0) for the same deadlocky reason as
5604 	 * above.  We reserve our extra bit here because we generate a ton of
5605 	 * delayed refs activity by truncating.
5606 	 *
5607 	 * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can,
5608 	 * if we fail to make this reservation we can re-try without the
5609 	 * delayed_refs_extra so we can make some forward progress.
5610 	 */
5611 	ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size + delayed_refs_extra,
5612 				     BTRFS_RESERVE_FLUSH_EVICT);
5613 	if (ret) {
5614 		ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size,
5615 					     BTRFS_RESERVE_FLUSH_EVICT);
5616 		if (ret) {
5617 			btrfs_warn(fs_info,
5618 				   "could not allocate space for delete; will truncate on mount");
5619 			return ERR_PTR(-ENOSPC);
5620 		}
5621 		delayed_refs_extra = 0;
5622 	}
5623 
5624 	trans = btrfs_join_transaction(root);
5625 	if (IS_ERR(trans))
5626 		return trans;
5627 
5628 	if (delayed_refs_extra) {
5629 		trans->block_rsv = &fs_info->trans_block_rsv;
5630 		trans->bytes_reserved = delayed_refs_extra;
5631 		btrfs_block_rsv_migrate(rsv, trans->block_rsv,
5632 					delayed_refs_extra, true);
5633 	}
5634 	return trans;
5635 }
5636 
btrfs_evict_inode(struct inode * inode)5637 void btrfs_evict_inode(struct inode *inode)
5638 {
5639 	struct btrfs_fs_info *fs_info;
5640 	struct btrfs_trans_handle *trans;
5641 	struct btrfs_root *root = BTRFS_I(inode)->root;
5642 	struct btrfs_block_rsv rsv;
5643 	int ret;
5644 
5645 	trace_btrfs_inode_evict(inode);
5646 
5647 	if (!root)
5648 		goto clear_inode;
5649 
5650 	fs_info = inode_to_fs_info(inode);
5651 	evict_inode_truncate_pages(inode);
5652 
5653 	if (inode->i_nlink &&
5654 	    ((btrfs_root_refs(&root->root_item) != 0 &&
5655 	      btrfs_root_id(root) != BTRFS_ROOT_TREE_OBJECTID) ||
5656 	     btrfs_is_free_space_inode(BTRFS_I(inode))))
5657 		goto out;
5658 
5659 	if (is_bad_inode(inode))
5660 		goto out;
5661 
5662 	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
5663 		goto out;
5664 
5665 	if (inode->i_nlink > 0) {
5666 		BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
5667 		       btrfs_root_id(root) != BTRFS_ROOT_TREE_OBJECTID);
5668 		goto out;
5669 	}
5670 
5671 	/*
5672 	 * This makes sure the inode item in tree is uptodate and the space for
5673 	 * the inode update is released.
5674 	 */
5675 	ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode));
5676 	if (ret)
5677 		goto out;
5678 
5679 	/*
5680 	 * This drops any pending insert or delete operations we have for this
5681 	 * inode.  We could have a delayed dir index deletion queued up, but
5682 	 * we're removing the inode completely so that'll be taken care of in
5683 	 * the truncate.
5684 	 */
5685 	btrfs_kill_delayed_inode_items(BTRFS_I(inode));
5686 
5687 	btrfs_init_metadata_block_rsv(fs_info, &rsv, BTRFS_BLOCK_RSV_TEMP);
5688 	rsv.size = btrfs_calc_metadata_size(fs_info, 1);
5689 	rsv.failfast = true;
5690 
5691 	btrfs_i_size_write(BTRFS_I(inode), 0);
5692 
5693 	while (1) {
5694 		struct btrfs_truncate_control control = {
5695 			.inode = BTRFS_I(inode),
5696 			.ino = btrfs_ino(BTRFS_I(inode)),
5697 			.new_size = 0,
5698 			.min_type = 0,
5699 		};
5700 
5701 		trans = evict_refill_and_join(root, &rsv);
5702 		if (IS_ERR(trans))
5703 			goto out_release;
5704 
5705 		trans->block_rsv = &rsv;
5706 
5707 		ret = btrfs_truncate_inode_items(trans, root, &control);
5708 		trans->block_rsv = &fs_info->trans_block_rsv;
5709 		btrfs_end_transaction(trans);
5710 		/*
5711 		 * We have not added new delayed items for our inode after we
5712 		 * have flushed its delayed items, so no need to throttle on
5713 		 * delayed items. However we have modified extent buffers.
5714 		 */
5715 		btrfs_btree_balance_dirty_nodelay(fs_info);
5716 		if (ret && ret != -ENOSPC && ret != -EAGAIN)
5717 			goto out_release;
5718 		else if (!ret)
5719 			break;
5720 	}
5721 
5722 	/*
5723 	 * Errors here aren't a big deal, it just means we leave orphan items in
5724 	 * the tree. They will be cleaned up on the next mount. If the inode
5725 	 * number gets reused, cleanup deletes the orphan item without doing
5726 	 * anything, and unlink reuses the existing orphan item.
5727 	 *
5728 	 * If it turns out that we are dropping too many of these, we might want
5729 	 * to add a mechanism for retrying these after a commit.
5730 	 */
5731 	trans = evict_refill_and_join(root, &rsv);
5732 	if (!IS_ERR(trans)) {
5733 		trans->block_rsv = &rsv;
5734 		btrfs_orphan_del(trans, BTRFS_I(inode));
5735 		trans->block_rsv = &fs_info->trans_block_rsv;
5736 		btrfs_end_transaction(trans);
5737 	}
5738 
5739 out_release:
5740 	btrfs_block_rsv_release(fs_info, &rsv, (u64)-1, NULL);
5741 out:
5742 	/*
5743 	 * If we didn't successfully delete, the orphan item will still be in
5744 	 * the tree and we'll retry on the next mount. Again, we might also want
5745 	 * to retry these periodically in the future.
5746 	 */
5747 	btrfs_remove_delayed_node(BTRFS_I(inode));
5748 clear_inode:
5749 	clear_inode(inode);
5750 }
5751 
5752 /*
5753  * Return the key found in the dir entry in the location pointer, fill @type
5754  * with BTRFS_FT_*, and return 0.
5755  *
5756  * If no dir entries were found, returns -ENOENT.
5757  * If found a corrupted location in dir entry, returns -EUCLEAN.
5758  */
btrfs_inode_by_name(struct btrfs_inode * dir,struct dentry * dentry,struct btrfs_key * location,u8 * type)5759 static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry,
5760 			       struct btrfs_key *location, u8 *type)
5761 {
5762 	struct btrfs_dir_item *di;
5763 	BTRFS_PATH_AUTO_FREE(path);
5764 	struct btrfs_root *root = dir->root;
5765 	int ret = 0;
5766 	struct fscrypt_name fname;
5767 
5768 	path = btrfs_alloc_path();
5769 	if (!path)
5770 		return -ENOMEM;
5771 
5772 	ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname);
5773 	if (ret < 0)
5774 		return ret;
5775 	/*
5776 	 * fscrypt_setup_filename() should never return a positive value, but
5777 	 * gcc on sparc/parisc thinks it can, so assert that doesn't happen.
5778 	 */
5779 	ASSERT(ret == 0);
5780 
5781 	/* This needs to handle no-key deletions later on */
5782 
5783 	di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir),
5784 				   &fname.disk_name, 0);
5785 	if (IS_ERR_OR_NULL(di)) {
5786 		ret = di ? PTR_ERR(di) : -ENOENT;
5787 		goto out;
5788 	}
5789 
5790 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
5791 	if (unlikely(location->type != BTRFS_INODE_ITEM_KEY &&
5792 		     location->type != BTRFS_ROOT_ITEM_KEY)) {
5793 		ret = -EUCLEAN;
5794 		btrfs_warn(root->fs_info,
5795 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location " BTRFS_KEY_FMT ")",
5796 			   __func__, fname.disk_name.name, btrfs_ino(dir),
5797 			   BTRFS_KEY_FMT_VALUE(location));
5798 	}
5799 	if (!ret)
5800 		*type = btrfs_dir_ftype(path->nodes[0], di);
5801 out:
5802 	fscrypt_free_filename(&fname);
5803 	return ret;
5804 }
5805 
5806 /*
5807  * when we hit a tree root in a directory, the btrfs part of the inode
5808  * needs to be changed to reflect the root directory of the tree root.  This
5809  * is kind of like crossing a mount point.
5810  */
fixup_tree_root_location(struct btrfs_fs_info * fs_info,struct btrfs_inode * dir,struct dentry * dentry,struct btrfs_key * location,struct btrfs_root ** sub_root)5811 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
5812 				    struct btrfs_inode *dir,
5813 				    struct dentry *dentry,
5814 				    struct btrfs_key *location,
5815 				    struct btrfs_root **sub_root)
5816 {
5817 	BTRFS_PATH_AUTO_FREE(path);
5818 	struct btrfs_root *new_root;
5819 	struct btrfs_root_ref *ref;
5820 	struct extent_buffer *leaf;
5821 	struct btrfs_key key;
5822 	int ret;
5823 	int err = 0;
5824 	struct fscrypt_name fname;
5825 
5826 	ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 0, &fname);
5827 	if (ret)
5828 		return ret;
5829 
5830 	path = btrfs_alloc_path();
5831 	if (!path) {
5832 		err = -ENOMEM;
5833 		goto out;
5834 	}
5835 
5836 	err = -ENOENT;
5837 	key.objectid = btrfs_root_id(dir->root);
5838 	key.type = BTRFS_ROOT_REF_KEY;
5839 	key.offset = location->objectid;
5840 
5841 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
5842 	if (ret) {
5843 		if (ret < 0)
5844 			err = ret;
5845 		goto out;
5846 	}
5847 
5848 	leaf = path->nodes[0];
5849 	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
5850 	if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
5851 	    btrfs_root_ref_name_len(leaf, ref) != fname.disk_name.len)
5852 		goto out;
5853 
5854 	ret = memcmp_extent_buffer(leaf, fname.disk_name.name,
5855 				   (unsigned long)(ref + 1), fname.disk_name.len);
5856 	if (ret)
5857 		goto out;
5858 
5859 	btrfs_release_path(path);
5860 
5861 	new_root = btrfs_get_fs_root(fs_info, location->objectid, true);
5862 	if (IS_ERR(new_root)) {
5863 		err = PTR_ERR(new_root);
5864 		goto out;
5865 	}
5866 
5867 	*sub_root = new_root;
5868 	location->objectid = btrfs_root_dirid(&new_root->root_item);
5869 	location->type = BTRFS_INODE_ITEM_KEY;
5870 	location->offset = 0;
5871 	err = 0;
5872 out:
5873 	fscrypt_free_filename(&fname);
5874 	return err;
5875 }
5876 
5877 
5878 
btrfs_del_inode_from_root(struct btrfs_inode * inode)5879 static void btrfs_del_inode_from_root(struct btrfs_inode *inode)
5880 {
5881 	struct btrfs_root *root = inode->root;
5882 	struct btrfs_inode *entry;
5883 	bool empty = false;
5884 
5885 	xa_lock(&root->inodes);
5886 	/*
5887 	 * This btrfs_inode is being freed and has already been unhashed at this
5888 	 * point. It's possible that another btrfs_inode has already been
5889 	 * allocated for the same inode and inserted itself into the root, so
5890 	 * don't delete it in that case.
5891 	 *
5892 	 * Note that this shouldn't need to allocate memory, so the gfp flags
5893 	 * don't really matter.
5894 	 */
5895 	entry = __xa_cmpxchg(&root->inodes, btrfs_ino(inode), inode, NULL,
5896 			     GFP_ATOMIC);
5897 	if (entry == inode)
5898 		empty = xa_empty(&root->inodes);
5899 	xa_unlock(&root->inodes);
5900 
5901 	if (empty && btrfs_root_refs(&root->root_item) == 0) {
5902 		xa_lock(&root->inodes);
5903 		empty = xa_empty(&root->inodes);
5904 		xa_unlock(&root->inodes);
5905 		if (empty)
5906 			btrfs_add_dead_root(root);
5907 	}
5908 }
5909 
5910 
btrfs_init_locked_inode(struct inode * inode,void * p)5911 static int btrfs_init_locked_inode(struct inode *inode, void *p)
5912 {
5913 	struct btrfs_iget_args *args = p;
5914 
5915 	btrfs_set_inode_number(BTRFS_I(inode), args->ino);
5916 	BTRFS_I(inode)->root = btrfs_grab_root(args->root);
5917 
5918 	if (args->root && args->root == args->root->fs_info->tree_root &&
5919 	    args->ino != BTRFS_BTREE_INODE_OBJECTID)
5920 		set_bit(BTRFS_INODE_FREE_SPACE_INODE,
5921 			&BTRFS_I(inode)->runtime_flags);
5922 	return 0;
5923 }
5924 
btrfs_find_actor(struct inode * inode,void * opaque)5925 static int btrfs_find_actor(struct inode *inode, void *opaque)
5926 {
5927 	struct btrfs_iget_args *args = opaque;
5928 
5929 	return args->ino == btrfs_ino(BTRFS_I(inode)) &&
5930 		args->root == BTRFS_I(inode)->root;
5931 }
5932 
btrfs_iget_locked(u64 ino,struct btrfs_root * root)5933 static struct btrfs_inode *btrfs_iget_locked(u64 ino, struct btrfs_root *root)
5934 {
5935 	struct inode *inode;
5936 	struct btrfs_iget_args args;
5937 	unsigned long hashval = btrfs_inode_hash(ino, root);
5938 
5939 	args.ino = ino;
5940 	args.root = root;
5941 
5942 	inode = iget5_locked_rcu(root->fs_info->sb, hashval, btrfs_find_actor,
5943 			     btrfs_init_locked_inode,
5944 			     (void *)&args);
5945 	if (!inode)
5946 		return NULL;
5947 	return BTRFS_I(inode);
5948 }
5949 
5950 /*
5951  * Get an inode object given its inode number and corresponding root.  Path is
5952  * preallocated to prevent recursing back to iget through allocator.
5953  */
btrfs_iget_path(u64 ino,struct btrfs_root * root,struct btrfs_path * path)5954 struct btrfs_inode *btrfs_iget_path(u64 ino, struct btrfs_root *root,
5955 				    struct btrfs_path *path)
5956 {
5957 	struct btrfs_inode *inode;
5958 	int ret;
5959 
5960 	inode = btrfs_iget_locked(ino, root);
5961 	if (!inode)
5962 		return ERR_PTR(-ENOMEM);
5963 
5964 	if (!(inode_state_read_once(&inode->vfs_inode) & I_NEW))
5965 		return inode;
5966 
5967 	ret = btrfs_read_locked_inode(inode, path);
5968 	if (ret)
5969 		return ERR_PTR(ret);
5970 
5971 	unlock_new_inode(&inode->vfs_inode);
5972 	return inode;
5973 }
5974 
5975 /*
5976  * Get an inode object given its inode number and corresponding root.
5977  */
btrfs_iget(u64 ino,struct btrfs_root * root)5978 struct btrfs_inode *btrfs_iget(u64 ino, struct btrfs_root *root)
5979 {
5980 	struct btrfs_inode *inode;
5981 	struct btrfs_path *path;
5982 	int ret;
5983 
5984 	inode = btrfs_iget_locked(ino, root);
5985 	if (!inode)
5986 		return ERR_PTR(-ENOMEM);
5987 
5988 	if (!(inode_state_read_once(&inode->vfs_inode) & I_NEW))
5989 		return inode;
5990 
5991 	path = btrfs_alloc_path();
5992 	if (!path) {
5993 		iget_failed(&inode->vfs_inode);
5994 		return ERR_PTR(-ENOMEM);
5995 	}
5996 
5997 	ret = btrfs_read_locked_inode(inode, path);
5998 	btrfs_free_path(path);
5999 	if (ret)
6000 		return ERR_PTR(ret);
6001 
6002 	if (S_ISDIR(inode->vfs_inode.i_mode))
6003 		inode->vfs_inode.i_opflags |= IOP_FASTPERM_MAY_EXEC;
6004 	unlock_new_inode(&inode->vfs_inode);
6005 	return inode;
6006 }
6007 
new_simple_dir(struct inode * dir,struct btrfs_key * key,struct btrfs_root * root)6008 static struct btrfs_inode *new_simple_dir(struct inode *dir,
6009 					  struct btrfs_key *key,
6010 					  struct btrfs_root *root)
6011 {
6012 	struct timespec64 ts;
6013 	struct inode *vfs_inode;
6014 	struct btrfs_inode *inode;
6015 
6016 	vfs_inode = new_inode(dir->i_sb);
6017 	if (!vfs_inode)
6018 		return ERR_PTR(-ENOMEM);
6019 
6020 	inode = BTRFS_I(vfs_inode);
6021 	inode->root = btrfs_grab_root(root);
6022 	inode->ref_root_id = key->objectid;
6023 	set_bit(BTRFS_INODE_ROOT_STUB, &inode->runtime_flags);
6024 	set_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags);
6025 
6026 	btrfs_set_inode_number(inode, BTRFS_EMPTY_SUBVOL_DIR_OBJECTID);
6027 	/*
6028 	 * We only need lookup, the rest is read-only and there's no inode
6029 	 * associated with the dentry
6030 	 */
6031 	vfs_inode->i_op = &simple_dir_inode_operations;
6032 	vfs_inode->i_opflags &= ~IOP_XATTR;
6033 	vfs_inode->i_fop = &simple_dir_operations;
6034 	vfs_inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
6035 
6036 	ts = inode_set_ctime_current(vfs_inode);
6037 	inode_set_mtime_to_ts(vfs_inode, ts);
6038 	inode_set_atime_to_ts(vfs_inode, inode_get_atime(dir));
6039 	inode->i_otime_sec = ts.tv_sec;
6040 	inode->i_otime_nsec = ts.tv_nsec;
6041 
6042 	vfs_inode->i_uid = dir->i_uid;
6043 	vfs_inode->i_gid = dir->i_gid;
6044 
6045 	return inode;
6046 }
6047 
6048 static_assert(BTRFS_FT_UNKNOWN == FT_UNKNOWN);
6049 static_assert(BTRFS_FT_REG_FILE == FT_REG_FILE);
6050 static_assert(BTRFS_FT_DIR == FT_DIR);
6051 static_assert(BTRFS_FT_CHRDEV == FT_CHRDEV);
6052 static_assert(BTRFS_FT_BLKDEV == FT_BLKDEV);
6053 static_assert(BTRFS_FT_FIFO == FT_FIFO);
6054 static_assert(BTRFS_FT_SOCK == FT_SOCK);
6055 static_assert(BTRFS_FT_SYMLINK == FT_SYMLINK);
6056 
btrfs_inode_type(const struct btrfs_inode * inode)6057 static inline u8 btrfs_inode_type(const struct btrfs_inode *inode)
6058 {
6059 	return fs_umode_to_ftype(inode->vfs_inode.i_mode);
6060 }
6061 
btrfs_lookup_dentry(struct inode * dir,struct dentry * dentry)6062 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
6063 {
6064 	struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
6065 	struct btrfs_inode *inode;
6066 	struct btrfs_root *root = BTRFS_I(dir)->root;
6067 	struct btrfs_root *sub_root = root;
6068 	struct btrfs_key location = { 0 };
6069 	u8 di_type = 0;
6070 	int ret = 0;
6071 
6072 	if (dentry->d_name.len > BTRFS_NAME_LEN)
6073 		return ERR_PTR(-ENAMETOOLONG);
6074 
6075 	ret = btrfs_inode_by_name(BTRFS_I(dir), dentry, &location, &di_type);
6076 	if (ret < 0)
6077 		return ERR_PTR(ret);
6078 
6079 	if (location.type == BTRFS_INODE_ITEM_KEY) {
6080 		inode = btrfs_iget(location.objectid, root);
6081 		if (IS_ERR(inode))
6082 			return ERR_CAST(inode);
6083 
6084 		/* Do extra check against inode mode with di_type */
6085 		if (unlikely(btrfs_inode_type(inode) != di_type)) {
6086 			btrfs_crit(fs_info,
6087 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u",
6088 				  inode->vfs_inode.i_mode, btrfs_inode_type(inode),
6089 				  di_type);
6090 			iput(&inode->vfs_inode);
6091 			return ERR_PTR(-EUCLEAN);
6092 		}
6093 		return &inode->vfs_inode;
6094 	}
6095 
6096 	ret = fixup_tree_root_location(fs_info, BTRFS_I(dir), dentry,
6097 				       &location, &sub_root);
6098 	if (ret < 0) {
6099 		if (ret != -ENOENT)
6100 			inode = ERR_PTR(ret);
6101 		else
6102 			inode = new_simple_dir(dir, &location, root);
6103 	} else {
6104 		inode = btrfs_iget(location.objectid, sub_root);
6105 		btrfs_put_root(sub_root);
6106 
6107 		if (IS_ERR(inode))
6108 			return ERR_CAST(inode);
6109 
6110 		down_read(&fs_info->cleanup_work_sem);
6111 		if (!sb_rdonly(inode->vfs_inode.i_sb))
6112 			ret = btrfs_orphan_cleanup(sub_root);
6113 		up_read(&fs_info->cleanup_work_sem);
6114 		if (ret) {
6115 			iput(&inode->vfs_inode);
6116 			inode = ERR_PTR(ret);
6117 		}
6118 	}
6119 
6120 	if (IS_ERR(inode))
6121 		return ERR_CAST(inode);
6122 
6123 	return &inode->vfs_inode;
6124 }
6125 
btrfs_dentry_delete(const struct dentry * dentry)6126 static int btrfs_dentry_delete(const struct dentry *dentry)
6127 {
6128 	struct btrfs_root *root;
6129 	struct inode *inode = d_inode(dentry);
6130 
6131 	if (!inode && !IS_ROOT(dentry))
6132 		inode = d_inode(dentry->d_parent);
6133 
6134 	if (inode) {
6135 		root = BTRFS_I(inode)->root;
6136 		if (btrfs_root_refs(&root->root_item) == 0)
6137 			return 1;
6138 
6139 		if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
6140 			return 1;
6141 	}
6142 	return 0;
6143 }
6144 
btrfs_lookup(struct inode * dir,struct dentry * dentry,unsigned int flags)6145 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
6146 				   unsigned int flags)
6147 {
6148 	struct inode *inode = btrfs_lookup_dentry(dir, dentry);
6149 
6150 	if (inode == ERR_PTR(-ENOENT))
6151 		inode = NULL;
6152 	return d_splice_alias(inode, dentry);
6153 }
6154 
6155 /*
6156  * Find the highest existing sequence number in a directory and then set the
6157  * in-memory index_cnt variable to the first free sequence number.
6158  */
btrfs_set_inode_index_count(struct btrfs_inode * inode)6159 static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
6160 {
6161 	struct btrfs_root *root = inode->root;
6162 	struct btrfs_key key, found_key;
6163 	BTRFS_PATH_AUTO_FREE(path);
6164 	struct extent_buffer *leaf;
6165 	int ret;
6166 
6167 	key.objectid = btrfs_ino(inode);
6168 	key.type = BTRFS_DIR_INDEX_KEY;
6169 	key.offset = (u64)-1;
6170 
6171 	path = btrfs_alloc_path();
6172 	if (!path)
6173 		return -ENOMEM;
6174 
6175 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6176 	if (ret < 0)
6177 		return ret;
6178 
6179 	if (unlikely(ret == 0)) {
6180 		/*
6181 		 * Key with offset -1 found, there would have to exist a dir
6182 		 * index item with such offset, but this is out of the valid
6183 		 * range.
6184 		 */
6185 		btrfs_err(root->fs_info,
6186 			  "unexpected exact match for DIR_INDEX key, inode %llu",
6187 			  btrfs_ino(inode));
6188 		return -EUCLEAN;
6189 	}
6190 
6191 	if (path->slots[0] == 0) {
6192 		inode->index_cnt = BTRFS_DIR_START_INDEX;
6193 		return 0;
6194 	}
6195 
6196 	path->slots[0]--;
6197 
6198 	leaf = path->nodes[0];
6199 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6200 
6201 	if (found_key.objectid != btrfs_ino(inode) ||
6202 	    found_key.type != BTRFS_DIR_INDEX_KEY) {
6203 		inode->index_cnt = BTRFS_DIR_START_INDEX;
6204 		return 0;
6205 	}
6206 
6207 	inode->index_cnt = found_key.offset + 1;
6208 
6209 	return 0;
6210 }
6211 
btrfs_get_dir_last_index(struct btrfs_inode * dir,u64 * index)6212 static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index)
6213 {
6214 	int ret = 0;
6215 
6216 	btrfs_inode_lock(dir, 0);
6217 	if (dir->index_cnt == (u64)-1) {
6218 		ret = btrfs_inode_delayed_dir_index_count(dir);
6219 		if (ret) {
6220 			ret = btrfs_set_inode_index_count(dir);
6221 			if (ret)
6222 				goto out;
6223 		}
6224 	}
6225 
6226 	/* index_cnt is the index number of next new entry, so decrement it. */
6227 	*index = dir->index_cnt - 1;
6228 out:
6229 	btrfs_inode_unlock(dir, 0);
6230 
6231 	return ret;
6232 }
6233 
6234 /*
6235  * All this infrastructure exists because dir_emit can fault, and we are holding
6236  * the tree lock when doing readdir.  For now just allocate a buffer and copy
6237  * our information into that, and then dir_emit from the buffer.  This is
6238  * similar to what NFS does, only we don't keep the buffer around in pagecache
6239  * because I'm afraid I'll mess that up.  Long term we need to make filldir do
6240  * copy_to_user_inatomic so we don't have to worry about page faulting under the
6241  * tree lock.
6242  */
btrfs_opendir(struct inode * inode,struct file * file)6243 static int btrfs_opendir(struct inode *inode, struct file *file)
6244 {
6245 	struct btrfs_file_private *private;
6246 	u64 last_index;
6247 	int ret;
6248 
6249 	ret = btrfs_get_dir_last_index(BTRFS_I(inode), &last_index);
6250 	if (ret)
6251 		return ret;
6252 
6253 	private = kzalloc_obj(struct btrfs_file_private);
6254 	if (!private)
6255 		return -ENOMEM;
6256 	private->last_index = last_index;
6257 	private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
6258 	if (!private->filldir_buf) {
6259 		kfree(private);
6260 		return -ENOMEM;
6261 	}
6262 	file->private_data = private;
6263 	return 0;
6264 }
6265 
btrfs_dir_llseek(struct file * file,loff_t offset,int whence)6266 static loff_t btrfs_dir_llseek(struct file *file, loff_t offset, int whence)
6267 {
6268 	struct btrfs_file_private *private = file->private_data;
6269 	int ret;
6270 
6271 	ret = btrfs_get_dir_last_index(BTRFS_I(file_inode(file)),
6272 				       &private->last_index);
6273 	if (ret)
6274 		return ret;
6275 
6276 	return generic_file_llseek(file, offset, whence);
6277 }
6278 
6279 struct dir_entry {
6280 	u64 ino;
6281 	u64 offset;
6282 	unsigned type;
6283 	int name_len;
6284 };
6285 
btrfs_filldir(void * addr,int entries,struct dir_context * ctx)6286 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
6287 {
6288 	while (entries--) {
6289 		struct dir_entry *entry = addr;
6290 		char *name = (char *)(entry + 1);
6291 
6292 		ctx->pos = get_unaligned(&entry->offset);
6293 		if (!dir_emit(ctx, name, get_unaligned(&entry->name_len),
6294 					 get_unaligned(&entry->ino),
6295 					 get_unaligned(&entry->type)))
6296 			return 1;
6297 		addr += sizeof(struct dir_entry) +
6298 			get_unaligned(&entry->name_len);
6299 		ctx->pos++;
6300 	}
6301 	return 0;
6302 }
6303 
btrfs_real_readdir(struct file * file,struct dir_context * ctx)6304 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
6305 {
6306 	struct inode *inode = file_inode(file);
6307 	struct btrfs_root *root = BTRFS_I(inode)->root;
6308 	struct btrfs_file_private *private = file->private_data;
6309 	struct btrfs_dir_item *di;
6310 	struct btrfs_key key;
6311 	struct btrfs_key found_key;
6312 	BTRFS_PATH_AUTO_FREE(path);
6313 	void *addr;
6314 	LIST_HEAD(ins_list);
6315 	LIST_HEAD(del_list);
6316 	int ret;
6317 	char *name_ptr;
6318 	int name_len;
6319 	int entries = 0;
6320 	int total_len = 0;
6321 	bool put = false;
6322 	struct btrfs_key location;
6323 
6324 	if (!dir_emit_dots(file, ctx))
6325 		return 0;
6326 
6327 	path = btrfs_alloc_path();
6328 	if (!path)
6329 		return -ENOMEM;
6330 
6331 	addr = private->filldir_buf;
6332 	path->reada = READA_FORWARD;
6333 
6334 	put = btrfs_readdir_get_delayed_items(BTRFS_I(inode), private->last_index,
6335 					      &ins_list, &del_list);
6336 
6337 again:
6338 	key.type = BTRFS_DIR_INDEX_KEY;
6339 	key.offset = ctx->pos;
6340 	key.objectid = btrfs_ino(BTRFS_I(inode));
6341 
6342 	btrfs_for_each_slot(root, &key, &found_key, path, ret) {
6343 		struct dir_entry *entry;
6344 		struct extent_buffer *leaf = path->nodes[0];
6345 		u8 ftype;
6346 
6347 		if (found_key.objectid != key.objectid)
6348 			break;
6349 		if (found_key.type != BTRFS_DIR_INDEX_KEY)
6350 			break;
6351 		if (found_key.offset < ctx->pos)
6352 			continue;
6353 		if (found_key.offset > private->last_index)
6354 			break;
6355 		if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
6356 			continue;
6357 		di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
6358 		name_len = btrfs_dir_name_len(leaf, di);
6359 		if ((total_len + sizeof(struct dir_entry) + name_len) >=
6360 		    PAGE_SIZE) {
6361 			btrfs_release_path(path);
6362 			ret = btrfs_filldir(private->filldir_buf, entries, ctx);
6363 			if (ret)
6364 				goto nopos;
6365 			addr = private->filldir_buf;
6366 			entries = 0;
6367 			total_len = 0;
6368 			goto again;
6369 		}
6370 
6371 		ftype = btrfs_dir_flags_to_ftype(btrfs_dir_flags(leaf, di));
6372 		entry = addr;
6373 		name_ptr = (char *)(entry + 1);
6374 		read_extent_buffer(leaf, name_ptr,
6375 				   (unsigned long)(di + 1), name_len);
6376 		put_unaligned(name_len, &entry->name_len);
6377 		put_unaligned(fs_ftype_to_dtype(ftype), &entry->type);
6378 		btrfs_dir_item_key_to_cpu(leaf, di, &location);
6379 		put_unaligned(location.objectid, &entry->ino);
6380 		put_unaligned(found_key.offset, &entry->offset);
6381 		entries++;
6382 		addr += sizeof(struct dir_entry) + name_len;
6383 		total_len += sizeof(struct dir_entry) + name_len;
6384 	}
6385 	/* Catch error encountered during iteration */
6386 	if (ret < 0)
6387 		goto err;
6388 
6389 	btrfs_release_path(path);
6390 
6391 	ret = btrfs_filldir(private->filldir_buf, entries, ctx);
6392 	if (ret)
6393 		goto nopos;
6394 
6395 	if (btrfs_readdir_delayed_dir_index(ctx, &ins_list))
6396 		goto nopos;
6397 
6398 	/*
6399 	 * Stop new entries from being returned after we return the last
6400 	 * entry.
6401 	 *
6402 	 * New directory entries are assigned a strictly increasing
6403 	 * offset.  This means that new entries created during readdir
6404 	 * are *guaranteed* to be seen in the future by that readdir.
6405 	 * This has broken buggy programs which operate on names as
6406 	 * they're returned by readdir.  Until we reuse freed offsets
6407 	 * we have this hack to stop new entries from being returned
6408 	 * under the assumption that they'll never reach this huge
6409 	 * offset.
6410 	 *
6411 	 * This is being careful not to overflow 32bit loff_t unless the
6412 	 * last entry requires it because doing so has broken 32bit apps
6413 	 * in the past.
6414 	 */
6415 	if (ctx->pos >= INT_MAX)
6416 		ctx->pos = LLONG_MAX;
6417 	else
6418 		ctx->pos = INT_MAX;
6419 nopos:
6420 	ret = 0;
6421 err:
6422 	if (put)
6423 		btrfs_readdir_put_delayed_items(BTRFS_I(inode), &ins_list, &del_list);
6424 	return ret;
6425 }
6426 
6427 /*
6428  * This is somewhat expensive, updating the tree every time the
6429  * inode changes.  But, it is most likely to find the inode in cache.
6430  * FIXME, needs more benchmarking...there are no reasons other than performance
6431  * to keep or drop this code.
6432  */
btrfs_dirty_inode(struct btrfs_inode * inode)6433 static int btrfs_dirty_inode(struct btrfs_inode *inode)
6434 {
6435 	struct btrfs_root *root = inode->root;
6436 	struct btrfs_fs_info *fs_info = root->fs_info;
6437 	struct btrfs_trans_handle *trans;
6438 	int ret;
6439 
6440 	if (test_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags))
6441 		return 0;
6442 
6443 	trans = btrfs_join_transaction(root);
6444 	if (IS_ERR(trans))
6445 		return PTR_ERR(trans);
6446 
6447 	ret = btrfs_update_inode(trans, inode);
6448 	if (ret == -ENOSPC || ret == -EDQUOT) {
6449 		/* whoops, lets try again with the full transaction */
6450 		btrfs_end_transaction(trans);
6451 		trans = btrfs_start_transaction(root, 1);
6452 		if (IS_ERR(trans))
6453 			return PTR_ERR(trans);
6454 
6455 		ret = btrfs_update_inode(trans, inode);
6456 	}
6457 	btrfs_end_transaction(trans);
6458 	if (inode->delayed_node)
6459 		btrfs_balance_delayed_items(fs_info);
6460 
6461 	return ret;
6462 }
6463 
6464 /*
6465  * We need our own ->update_time so that we can return error on ENOSPC for
6466  * updating the inode in the case of file write and mmap writes.
6467  */
btrfs_update_time(struct inode * inode,enum fs_update_time type,unsigned int flags)6468 static int btrfs_update_time(struct inode *inode, enum fs_update_time type,
6469 		unsigned int flags)
6470 {
6471 	struct btrfs_root *root = BTRFS_I(inode)->root;
6472 	int dirty;
6473 
6474 	if (btrfs_root_readonly(root))
6475 		return -EROFS;
6476 	if (flags & IOCB_NOWAIT)
6477 		return -EAGAIN;
6478 
6479 	dirty = inode_update_time(inode, type, flags);
6480 	if (dirty <= 0)
6481 		return dirty;
6482 	return btrfs_dirty_inode(BTRFS_I(inode));
6483 }
6484 
6485 /*
6486  * helper to find a free sequence number in a given directory.  This current
6487  * code is very simple, later versions will do smarter things in the btree
6488  */
btrfs_set_inode_index(struct btrfs_inode * dir,u64 * index)6489 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index)
6490 {
6491 	int ret = 0;
6492 
6493 	if (dir->index_cnt == (u64)-1) {
6494 		ret = btrfs_inode_delayed_dir_index_count(dir);
6495 		if (ret) {
6496 			ret = btrfs_set_inode_index_count(dir);
6497 			if (ret)
6498 				return ret;
6499 		}
6500 	}
6501 
6502 	*index = dir->index_cnt;
6503 	dir->index_cnt++;
6504 
6505 	return ret;
6506 }
6507 
btrfs_insert_inode_locked(struct inode * inode)6508 static int btrfs_insert_inode_locked(struct inode *inode)
6509 {
6510 	struct btrfs_iget_args args;
6511 
6512 	args.ino = btrfs_ino(BTRFS_I(inode));
6513 	args.root = BTRFS_I(inode)->root;
6514 
6515 	return insert_inode_locked4(inode,
6516 		   btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
6517 		   btrfs_find_actor, &args);
6518 }
6519 
btrfs_new_inode_prepare(struct btrfs_new_inode_args * args,unsigned int * trans_num_items)6520 int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args,
6521 			    unsigned int *trans_num_items)
6522 {
6523 	struct inode *dir = args->dir;
6524 	struct inode *inode = args->inode;
6525 	int ret;
6526 
6527 	if (!args->orphan) {
6528 		ret = fscrypt_setup_filename(dir, &args->dentry->d_name, 0,
6529 					     &args->fname);
6530 		if (ret)
6531 			return ret;
6532 	}
6533 
6534 	ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl);
6535 	if (ret) {
6536 		fscrypt_free_filename(&args->fname);
6537 		return ret;
6538 	}
6539 
6540 	/* 1 to add inode item */
6541 	*trans_num_items = 1;
6542 	/* 1 to add compression property */
6543 	if (BTRFS_I(dir)->prop_compress)
6544 		(*trans_num_items)++;
6545 	/* 1 to add default ACL xattr */
6546 	if (args->default_acl)
6547 		(*trans_num_items)++;
6548 	/* 1 to add access ACL xattr */
6549 	if (args->acl)
6550 		(*trans_num_items)++;
6551 #ifdef CONFIG_SECURITY
6552 	/* 1 to add LSM xattr */
6553 	if (dir->i_security)
6554 		(*trans_num_items)++;
6555 #endif
6556 	if (args->orphan) {
6557 		/* 1 to add orphan item */
6558 		(*trans_num_items)++;
6559 	} else {
6560 		/*
6561 		 * 1 to add dir item
6562 		 * 1 to add dir index
6563 		 * 1 to update parent inode item
6564 		 *
6565 		 * No need for 1 unit for the inode ref item because it is
6566 		 * inserted in a batch together with the inode item at
6567 		 * btrfs_create_new_inode().
6568 		 */
6569 		*trans_num_items += 3;
6570 	}
6571 	return 0;
6572 }
6573 
btrfs_new_inode_args_destroy(struct btrfs_new_inode_args * args)6574 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args)
6575 {
6576 	posix_acl_release(args->acl);
6577 	posix_acl_release(args->default_acl);
6578 	fscrypt_free_filename(&args->fname);
6579 }
6580 
6581 /*
6582  * Inherit flags from the parent inode.
6583  *
6584  * Currently only the compression flags and the cow flags are inherited.
6585  */
btrfs_inherit_iflags(struct btrfs_inode * inode,struct btrfs_inode * dir)6586 static void btrfs_inherit_iflags(struct btrfs_inode *inode, struct btrfs_inode *dir)
6587 {
6588 	unsigned int flags;
6589 
6590 	flags = dir->flags;
6591 
6592 	if (flags & BTRFS_INODE_NOCOMPRESS) {
6593 		inode->flags &= ~BTRFS_INODE_COMPRESS;
6594 		inode->flags |= BTRFS_INODE_NOCOMPRESS;
6595 	} else if (flags & BTRFS_INODE_COMPRESS) {
6596 		inode->flags &= ~BTRFS_INODE_NOCOMPRESS;
6597 		inode->flags |= BTRFS_INODE_COMPRESS;
6598 	}
6599 
6600 	if (flags & BTRFS_INODE_NODATACOW) {
6601 		inode->flags |= BTRFS_INODE_NODATACOW;
6602 		if (S_ISREG(inode->vfs_inode.i_mode))
6603 			inode->flags |= BTRFS_INODE_NODATASUM;
6604 	}
6605 
6606 	btrfs_sync_inode_flags_to_i_flags(inode);
6607 }
6608 
btrfs_create_new_inode(struct btrfs_trans_handle * trans,struct btrfs_new_inode_args * args)6609 int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
6610 			   struct btrfs_new_inode_args *args)
6611 {
6612 	struct timespec64 ts;
6613 	struct inode *dir = args->dir;
6614 	struct inode *inode = args->inode;
6615 	const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name;
6616 	struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
6617 	struct btrfs_root *root;
6618 	struct btrfs_inode_item *inode_item;
6619 	struct btrfs_path *path;
6620 	u64 objectid;
6621 	struct btrfs_inode_ref *ref;
6622 	struct btrfs_key key[2];
6623 	u32 sizes[2];
6624 	struct btrfs_item_batch batch;
6625 	unsigned long ptr;
6626 	int ret;
6627 	bool xa_reserved = false;
6628 
6629 	if (!args->orphan && !args->subvol) {
6630 		/*
6631 		 * Before anything else, check if we can add the name to the
6632 		 * parent directory. We want to avoid a dir item overflow in
6633 		 * case we have an existing dir item due to existing name
6634 		 * hash collisions. We do this check here before we call
6635 		 * btrfs_add_link() down below so that we can avoid a
6636 		 * transaction abort (which could be exploited by malicious
6637 		 * users).
6638 		 *
6639 		 * For subvolumes we already do this in btrfs_mksubvol().
6640 		 */
6641 		ret = btrfs_check_dir_item_collision(BTRFS_I(dir)->root,
6642 						     btrfs_ino(BTRFS_I(dir)),
6643 						     name);
6644 		if (ret < 0)
6645 			return ret;
6646 	}
6647 
6648 	path = btrfs_alloc_path();
6649 	if (!path)
6650 		return -ENOMEM;
6651 
6652 	if (!args->subvol)
6653 		BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root);
6654 	root = BTRFS_I(inode)->root;
6655 
6656 	ret = btrfs_init_file_extent_tree(BTRFS_I(inode));
6657 	if (ret)
6658 		goto out;
6659 
6660 	ret = btrfs_get_free_objectid(root, &objectid);
6661 	if (ret)
6662 		goto out;
6663 	btrfs_set_inode_number(BTRFS_I(inode), objectid);
6664 
6665 	ret = xa_reserve(&root->inodes, objectid, GFP_NOFS);
6666 	if (ret)
6667 		goto out;
6668 	xa_reserved = true;
6669 
6670 	if (args->orphan) {
6671 		/*
6672 		 * O_TMPFILE, set link count to 0, so that after this point, we
6673 		 * fill in an inode item with the correct link count.
6674 		 */
6675 		set_nlink(inode, 0);
6676 	} else {
6677 		trace_btrfs_inode_request(dir);
6678 
6679 		ret = btrfs_set_inode_index(BTRFS_I(dir), &BTRFS_I(inode)->dir_index);
6680 		if (ret)
6681 			goto out;
6682 	}
6683 
6684 	if (S_ISDIR(inode->i_mode))
6685 		BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX;
6686 
6687 	BTRFS_I(inode)->generation = trans->transid;
6688 	inode->i_generation = BTRFS_I(inode)->generation;
6689 
6690 	/*
6691 	 * We don't have any capability xattrs set here yet, shortcut any
6692 	 * queries for the xattrs here.  If we add them later via the inode
6693 	 * security init path or any other path this flag will be cleared.
6694 	 */
6695 	set_bit(BTRFS_INODE_NO_CAP_XATTR, &BTRFS_I(inode)->runtime_flags);
6696 
6697 	/*
6698 	 * Subvolumes don't inherit flags from their parent directory.
6699 	 * Originally this was probably by accident, but we probably can't
6700 	 * change it now without compatibility issues.
6701 	 */
6702 	if (!args->subvol)
6703 		btrfs_inherit_iflags(BTRFS_I(inode), BTRFS_I(dir));
6704 
6705 	btrfs_set_inode_mapping_order(BTRFS_I(inode));
6706 	if (S_ISREG(inode->i_mode)) {
6707 		if (btrfs_test_opt(fs_info, NODATASUM))
6708 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6709 		if (btrfs_test_opt(fs_info, NODATACOW))
6710 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
6711 				BTRFS_INODE_NODATASUM;
6712 		btrfs_update_inode_mapping_flags(BTRFS_I(inode));
6713 	}
6714 
6715 	ret = btrfs_insert_inode_locked(inode);
6716 	if (ret < 0) {
6717 		if (!args->orphan)
6718 			BTRFS_I(dir)->index_cnt--;
6719 		goto out;
6720 	}
6721 
6722 	/*
6723 	 * We could have gotten an inode number from somebody who was fsynced
6724 	 * and then removed in this same transaction, so let's just set full
6725 	 * sync since it will be a full sync anyway and this will blow away the
6726 	 * old info in the log.
6727 	 */
6728 	btrfs_set_inode_full_sync(BTRFS_I(inode));
6729 
6730 	key[0].objectid = objectid;
6731 	key[0].type = BTRFS_INODE_ITEM_KEY;
6732 	key[0].offset = 0;
6733 
6734 	sizes[0] = sizeof(struct btrfs_inode_item);
6735 
6736 	if (!args->orphan) {
6737 		/*
6738 		 * Start new inodes with an inode_ref. This is slightly more
6739 		 * efficient for small numbers of hard links since they will
6740 		 * be packed into one item. Extended refs will kick in if we
6741 		 * add more hard links than can fit in the ref item.
6742 		 */
6743 		key[1].objectid = objectid;
6744 		key[1].type = BTRFS_INODE_REF_KEY;
6745 		if (args->subvol) {
6746 			key[1].offset = objectid;
6747 			sizes[1] = 2 + sizeof(*ref);
6748 		} else {
6749 			key[1].offset = btrfs_ino(BTRFS_I(dir));
6750 			sizes[1] = name->len + sizeof(*ref);
6751 		}
6752 	}
6753 
6754 	batch.keys = &key[0];
6755 	batch.data_sizes = &sizes[0];
6756 	batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]);
6757 	batch.nr = args->orphan ? 1 : 2;
6758 	ret = btrfs_insert_empty_items(trans, root, path, &batch);
6759 	if (unlikely(ret != 0)) {
6760 		btrfs_abort_transaction(trans, ret);
6761 		goto discard;
6762 	}
6763 
6764 	ts = simple_inode_init_ts(inode);
6765 	BTRFS_I(inode)->i_otime_sec = ts.tv_sec;
6766 	BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec;
6767 
6768 	/*
6769 	 * We're going to fill the inode item now, so at this point the inode
6770 	 * must be fully initialized.
6771 	 */
6772 
6773 	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
6774 				  struct btrfs_inode_item);
6775 	memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item,
6776 			     sizeof(*inode_item));
6777 	fill_inode_item(trans, path->nodes[0], inode_item, inode);
6778 
6779 	if (!args->orphan) {
6780 		ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
6781 				     struct btrfs_inode_ref);
6782 		ptr = (unsigned long)(ref + 1);
6783 		if (args->subvol) {
6784 			btrfs_set_inode_ref_name_len(path->nodes[0], ref, 2);
6785 			btrfs_set_inode_ref_index(path->nodes[0], ref, 0);
6786 			write_extent_buffer(path->nodes[0], "..", ptr, 2);
6787 		} else {
6788 			btrfs_set_inode_ref_name_len(path->nodes[0], ref,
6789 						     name->len);
6790 			btrfs_set_inode_ref_index(path->nodes[0], ref,
6791 						  BTRFS_I(inode)->dir_index);
6792 			write_extent_buffer(path->nodes[0], name->name, ptr,
6793 					    name->len);
6794 		}
6795 	}
6796 
6797 	/*
6798 	 * We don't need the path anymore, plus inheriting properties, adding
6799 	 * ACLs, security xattrs, orphan item or adding the link, will result in
6800 	 * allocating yet another path. So just free our path.
6801 	 */
6802 	btrfs_free_path(path);
6803 	path = NULL;
6804 
6805 	if (args->subvol) {
6806 		struct btrfs_inode *parent;
6807 
6808 		/*
6809 		 * Subvolumes inherit properties from their parent subvolume,
6810 		 * not the directory they were created in.
6811 		 */
6812 		parent = btrfs_iget(BTRFS_FIRST_FREE_OBJECTID, BTRFS_I(dir)->root);
6813 		if (IS_ERR(parent)) {
6814 			ret = PTR_ERR(parent);
6815 		} else {
6816 			ret = btrfs_inode_inherit_props(trans, BTRFS_I(inode),
6817 							parent);
6818 			iput(&parent->vfs_inode);
6819 		}
6820 	} else {
6821 		ret = btrfs_inode_inherit_props(trans, BTRFS_I(inode),
6822 						BTRFS_I(dir));
6823 	}
6824 	if (ret) {
6825 		btrfs_err(fs_info,
6826 			  "error inheriting props for ino %llu (root %llu): %d",
6827 			  btrfs_ino(BTRFS_I(inode)), btrfs_root_id(root), ret);
6828 	}
6829 
6830 	/*
6831 	 * Subvolumes don't inherit ACLs or get passed to the LSM. This is
6832 	 * probably a bug.
6833 	 */
6834 	if (!args->subvol) {
6835 		ret = btrfs_init_inode_security(trans, args);
6836 		if (unlikely(ret)) {
6837 			btrfs_abort_transaction(trans, ret);
6838 			goto discard;
6839 		}
6840 	}
6841 
6842 	ret = btrfs_add_inode_to_root(BTRFS_I(inode), false);
6843 	if (WARN_ON(ret)) {
6844 		/* Shouldn't happen, we used xa_reserve() before. */
6845 		btrfs_abort_transaction(trans, ret);
6846 		goto discard;
6847 	}
6848 
6849 	trace_btrfs_inode_new(inode);
6850 	btrfs_set_inode_last_trans(trans, BTRFS_I(inode));
6851 
6852 	btrfs_update_root_times(trans, root);
6853 
6854 	if (args->orphan) {
6855 		ret = btrfs_orphan_add(trans, BTRFS_I(inode));
6856 		if (unlikely(ret)) {
6857 			btrfs_abort_transaction(trans, ret);
6858 			goto discard;
6859 		}
6860 	} else {
6861 		ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
6862 				     0, BTRFS_I(inode)->dir_index);
6863 		if (unlikely(ret)) {
6864 			btrfs_abort_transaction(trans, ret);
6865 			goto discard;
6866 		}
6867 	}
6868 
6869 	return 0;
6870 
6871 discard:
6872 	/*
6873 	 * discard_new_inode() calls iput(), but the caller owns the reference
6874 	 * to the inode.
6875 	 */
6876 	ihold(inode);
6877 	discard_new_inode(inode);
6878 out:
6879 	if (xa_reserved)
6880 		xa_release(&root->inodes, objectid);
6881 
6882 	btrfs_free_path(path);
6883 	return ret;
6884 }
6885 
6886 /*
6887  * utility function to add 'inode' into 'parent_inode' with
6888  * a give name and a given sequence number.
6889  * if 'add_backref' is true, also insert a backref from the
6890  * inode to the parent directory.
6891  */
btrfs_add_link(struct btrfs_trans_handle * trans,struct btrfs_inode * parent_inode,struct btrfs_inode * inode,const struct fscrypt_str * name,bool add_backref,u64 index)6892 int btrfs_add_link(struct btrfs_trans_handle *trans,
6893 		   struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
6894 		   const struct fscrypt_str *name, bool add_backref, u64 index)
6895 {
6896 	int ret = 0;
6897 	struct btrfs_key key;
6898 	struct btrfs_root *root = parent_inode->root;
6899 	u64 ino = btrfs_ino(inode);
6900 	u64 parent_ino = btrfs_ino(parent_inode);
6901 
6902 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6903 		memcpy(&key, &inode->root->root_key, sizeof(key));
6904 	} else {
6905 		key.objectid = ino;
6906 		key.type = BTRFS_INODE_ITEM_KEY;
6907 		key.offset = 0;
6908 	}
6909 
6910 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6911 		ret = btrfs_add_root_ref(trans, key.objectid,
6912 					 btrfs_root_id(root), parent_ino,
6913 					 index, name);
6914 	} else if (add_backref) {
6915 		ret = btrfs_insert_inode_ref(trans, root, name,
6916 					     ino, parent_ino, index);
6917 	}
6918 
6919 	/* Nothing to clean up yet */
6920 	if (ret)
6921 		return ret;
6922 
6923 	ret = btrfs_insert_dir_item(trans, name, parent_inode, &key,
6924 				    btrfs_inode_type(inode), index);
6925 	if (ret == -EEXIST || ret == -EOVERFLOW)
6926 		goto fail_dir_item;
6927 	else if (unlikely(ret)) {
6928 		btrfs_abort_transaction(trans, ret);
6929 		return ret;
6930 	}
6931 
6932 	btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
6933 			   name->len * 2);
6934 	inode_inc_iversion(&parent_inode->vfs_inode);
6935 	update_time_after_link_or_unlink(parent_inode);
6936 
6937 	ret = btrfs_update_inode(trans, parent_inode);
6938 	if (ret)
6939 		btrfs_abort_transaction(trans, ret);
6940 	return ret;
6941 
6942 fail_dir_item:
6943 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6944 		u64 local_index;
6945 		int ret2;
6946 
6947 		ret2 = btrfs_del_root_ref(trans, key.objectid, btrfs_root_id(root),
6948 					  parent_ino, &local_index, name);
6949 		if (ret2)
6950 			btrfs_abort_transaction(trans, ret2);
6951 	} else if (add_backref) {
6952 		int ret2;
6953 
6954 		ret2 = btrfs_del_inode_ref(trans, root, name, ino, parent_ino, NULL);
6955 		if (ret2)
6956 			btrfs_abort_transaction(trans, ret2);
6957 	}
6958 
6959 	/* Return the original error code */
6960 	return ret;
6961 }
6962 
btrfs_create_common(struct inode * dir,struct dentry * dentry,struct inode * inode)6963 static int btrfs_create_common(struct inode *dir, struct dentry *dentry,
6964 			       struct inode *inode)
6965 {
6966 	struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
6967 	struct btrfs_root *root = BTRFS_I(dir)->root;
6968 	struct btrfs_new_inode_args new_inode_args = {
6969 		.dir = dir,
6970 		.dentry = dentry,
6971 		.inode = inode,
6972 	};
6973 	unsigned int trans_num_items;
6974 	struct btrfs_trans_handle *trans;
6975 	int ret;
6976 
6977 	ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
6978 	if (ret)
6979 		goto out_inode;
6980 
6981 	trans = btrfs_start_transaction(root, trans_num_items);
6982 	if (IS_ERR(trans)) {
6983 		ret = PTR_ERR(trans);
6984 		goto out_new_inode_args;
6985 	}
6986 
6987 	ret = btrfs_create_new_inode(trans, &new_inode_args);
6988 	if (!ret) {
6989 		if (S_ISDIR(inode->i_mode))
6990 			inode->i_opflags |= IOP_FASTPERM_MAY_EXEC;
6991 		d_instantiate_new(dentry, inode);
6992 	}
6993 
6994 	btrfs_end_transaction(trans);
6995 	btrfs_btree_balance_dirty(fs_info);
6996 out_new_inode_args:
6997 	btrfs_new_inode_args_destroy(&new_inode_args);
6998 out_inode:
6999 	if (ret)
7000 		iput(inode);
7001 	return ret;
7002 }
7003 
btrfs_mknod(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode,dev_t rdev)7004 static int btrfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
7005 		       struct dentry *dentry, umode_t mode, dev_t rdev)
7006 {
7007 	struct inode *inode;
7008 
7009 	inode = new_inode(dir->i_sb);
7010 	if (!inode)
7011 		return -ENOMEM;
7012 	inode_init_owner(idmap, inode, dir, mode);
7013 	inode->i_op = &btrfs_special_inode_operations;
7014 	init_special_inode(inode, inode->i_mode, rdev);
7015 	return btrfs_create_common(dir, dentry, inode);
7016 }
7017 
btrfs_create(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode,bool excl)7018 static int btrfs_create(struct mnt_idmap *idmap, struct inode *dir,
7019 			struct dentry *dentry, umode_t mode, bool excl)
7020 {
7021 	struct inode *inode;
7022 
7023 	inode = new_inode(dir->i_sb);
7024 	if (!inode)
7025 		return -ENOMEM;
7026 	inode_init_owner(idmap, inode, dir, mode);
7027 	inode->i_fop = &btrfs_file_operations;
7028 	inode->i_op = &btrfs_file_inode_operations;
7029 	inode->i_mapping->a_ops = &btrfs_aops;
7030 	return btrfs_create_common(dir, dentry, inode);
7031 }
7032 
btrfs_link(struct dentry * old_dentry,struct inode * dir,struct dentry * dentry)7033 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
7034 		      struct dentry *dentry)
7035 {
7036 	struct btrfs_trans_handle *trans = NULL;
7037 	struct btrfs_root *root = BTRFS_I(dir)->root;
7038 	struct inode *inode = d_inode(old_dentry);
7039 	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
7040 	struct fscrypt_name fname;
7041 	u64 index;
7042 	int ret;
7043 
7044 	/* do not allow sys_link's with other subvols of the same device */
7045 	if (btrfs_root_id(root) != btrfs_root_id(BTRFS_I(inode)->root))
7046 		return -EXDEV;
7047 
7048 	if (inode->i_nlink >= BTRFS_LINK_MAX)
7049 		return -EMLINK;
7050 
7051 	ret = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname);
7052 	if (ret)
7053 		goto fail;
7054 
7055 	ret = btrfs_set_inode_index(BTRFS_I(dir), &index);
7056 	if (ret)
7057 		goto fail;
7058 
7059 	/*
7060 	 * 2 items for inode and inode ref
7061 	 * 2 items for dir items
7062 	 * 1 item for parent inode
7063 	 * 1 item for orphan item deletion if O_TMPFILE
7064 	 */
7065 	trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6);
7066 	if (IS_ERR(trans)) {
7067 		ret = PTR_ERR(trans);
7068 		trans = NULL;
7069 		goto fail;
7070 	}
7071 
7072 	/* There are several dir indexes for this inode, clear the cache. */
7073 	BTRFS_I(inode)->dir_index = 0ULL;
7074 	inode_inc_iversion(inode);
7075 	inode_set_ctime_current(inode);
7076 
7077 	ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
7078 			     &fname.disk_name, 1, index);
7079 	if (ret)
7080 		goto fail;
7081 
7082 	/* Link added now we update the inode item with the new link count. */
7083 	inc_nlink(inode);
7084 	ret = btrfs_update_inode(trans, BTRFS_I(inode));
7085 	if (unlikely(ret)) {
7086 		btrfs_abort_transaction(trans, ret);
7087 		goto fail;
7088 	}
7089 
7090 	if (inode->i_nlink == 1) {
7091 		/*
7092 		 * If the new hard link count is 1, it's a file created with the
7093 		 * open(2) O_TMPFILE flag.
7094 		 */
7095 		ret = btrfs_orphan_del(trans, BTRFS_I(inode));
7096 		if (unlikely(ret)) {
7097 			btrfs_abort_transaction(trans, ret);
7098 			goto fail;
7099 		}
7100 	}
7101 
7102 	/* Grab reference for the new dentry passed to d_instantiate(). */
7103 	ihold(inode);
7104 	d_instantiate(dentry, inode);
7105 	btrfs_log_new_name(trans, old_dentry, NULL, 0, dentry->d_parent);
7106 
7107 fail:
7108 	fscrypt_free_filename(&fname);
7109 	if (trans)
7110 		btrfs_end_transaction(trans);
7111 	btrfs_btree_balance_dirty(fs_info);
7112 	return ret;
7113 }
7114 
btrfs_mkdir(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode)7115 static struct dentry *btrfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
7116 				  struct dentry *dentry, umode_t mode)
7117 {
7118 	struct inode *inode;
7119 
7120 	inode = new_inode(dir->i_sb);
7121 	if (!inode)
7122 		return ERR_PTR(-ENOMEM);
7123 	inode_init_owner(idmap, inode, dir, S_IFDIR | mode);
7124 	inode->i_op = &btrfs_dir_inode_operations;
7125 	inode->i_fop = &btrfs_dir_file_operations;
7126 	return ERR_PTR(btrfs_create_common(dir, dentry, inode));
7127 }
7128 
uncompress_inline(struct btrfs_path * path,struct folio * folio,struct btrfs_file_extent_item * item)7129 static noinline int uncompress_inline(struct btrfs_path *path,
7130 				      struct folio *folio,
7131 				      struct btrfs_file_extent_item *item)
7132 {
7133 	int ret;
7134 	struct extent_buffer *leaf = path->nodes[0];
7135 	const u32 blocksize = leaf->fs_info->sectorsize;
7136 	char *tmp;
7137 	size_t max_size;
7138 	unsigned long inline_size;
7139 	unsigned long ptr;
7140 	int compress_type;
7141 
7142 	compress_type = btrfs_file_extent_compression(leaf, item);
7143 	max_size = btrfs_file_extent_ram_bytes(leaf, item);
7144 	inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]);
7145 	tmp = kmalloc(inline_size, GFP_NOFS);
7146 	if (!tmp)
7147 		return -ENOMEM;
7148 	ptr = btrfs_file_extent_inline_start(item);
7149 
7150 	read_extent_buffer(leaf, tmp, ptr, inline_size);
7151 
7152 	max_size = min_t(unsigned long, blocksize, max_size);
7153 	ret = btrfs_decompress(compress_type, tmp, folio, 0, inline_size,
7154 			       max_size);
7155 
7156 	/*
7157 	 * decompression code contains a memset to fill in any space between the end
7158 	 * of the uncompressed data and the end of max_size in case the decompressed
7159 	 * data ends up shorter than ram_bytes.  That doesn't cover the hole between
7160 	 * the end of an inline extent and the beginning of the next block, so we
7161 	 * cover that region here.
7162 	 */
7163 
7164 	if (max_size < blocksize)
7165 		folio_zero_range(folio, max_size, blocksize - max_size);
7166 	kfree(tmp);
7167 	return ret;
7168 }
7169 
read_inline_extent(struct btrfs_path * path,struct folio * folio)7170 static int read_inline_extent(struct btrfs_path *path, struct folio *folio)
7171 {
7172 	const u32 blocksize = path->nodes[0]->fs_info->sectorsize;
7173 	struct btrfs_file_extent_item *fi;
7174 	void *kaddr;
7175 	size_t copy_size;
7176 
7177 	if (!folio || folio_test_uptodate(folio))
7178 		return 0;
7179 
7180 	ASSERT(folio_pos(folio) == 0);
7181 
7182 	fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
7183 			    struct btrfs_file_extent_item);
7184 	if (btrfs_file_extent_compression(path->nodes[0], fi) != BTRFS_COMPRESS_NONE)
7185 		return uncompress_inline(path, folio, fi);
7186 
7187 	copy_size = min_t(u64, blocksize,
7188 			  btrfs_file_extent_ram_bytes(path->nodes[0], fi));
7189 	kaddr = kmap_local_folio(folio, 0);
7190 	read_extent_buffer(path->nodes[0], kaddr,
7191 			   btrfs_file_extent_inline_start(fi), copy_size);
7192 	kunmap_local(kaddr);
7193 	if (copy_size < blocksize)
7194 		folio_zero_range(folio, copy_size, blocksize - copy_size);
7195 	return 0;
7196 }
7197 
7198 /*
7199  * Lookup the first extent overlapping a range in a file.
7200  *
7201  * @inode:	file to search in
7202  * @page:	page to read extent data into if the extent is inline
7203  * @start:	file offset
7204  * @len:	length of range starting at @start
7205  *
7206  * Return the first &struct extent_map which overlaps the given range, reading
7207  * it from the B-tree and caching it if necessary. Note that there may be more
7208  * extents which overlap the given range after the returned extent_map.
7209  *
7210  * If @page is not NULL and the extent is inline, this also reads the extent
7211  * data directly into the page and marks the extent up to date in the io_tree.
7212  *
7213  * Return: ERR_PTR on error, non-NULL extent_map on success.
7214  */
btrfs_get_extent(struct btrfs_inode * inode,struct folio * folio,u64 start,u64 len)7215 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
7216 				    struct folio *folio, u64 start, u64 len)
7217 {
7218 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
7219 	int ret = 0;
7220 	u64 extent_start = 0;
7221 	u64 extent_end = 0;
7222 	u64 objectid = btrfs_ino(inode);
7223 	int extent_type = -1;
7224 	struct btrfs_path *path = NULL;
7225 	struct btrfs_root *root = inode->root;
7226 	struct btrfs_file_extent_item *item;
7227 	struct extent_buffer *leaf;
7228 	struct btrfs_key found_key;
7229 	struct extent_map *em = NULL;
7230 	struct extent_map_tree *em_tree = &inode->extent_tree;
7231 
7232 	read_lock(&em_tree->lock);
7233 	em = btrfs_lookup_extent_mapping(em_tree, start, len);
7234 	read_unlock(&em_tree->lock);
7235 
7236 	if (em) {
7237 		if (em->start > start || btrfs_extent_map_end(em) <= start)
7238 			btrfs_free_extent_map(em);
7239 		else if (em->disk_bytenr == EXTENT_MAP_INLINE && folio)
7240 			btrfs_free_extent_map(em);
7241 		else
7242 			goto out;
7243 	}
7244 	em = btrfs_alloc_extent_map();
7245 	if (!em) {
7246 		ret = -ENOMEM;
7247 		goto out;
7248 	}
7249 	em->start = EXTENT_MAP_HOLE;
7250 	em->disk_bytenr = EXTENT_MAP_HOLE;
7251 	em->len = (u64)-1;
7252 
7253 	path = btrfs_alloc_path();
7254 	if (!path) {
7255 		ret = -ENOMEM;
7256 		goto out;
7257 	}
7258 
7259 	/* Chances are we'll be called again, so go ahead and do readahead */
7260 	path->reada = READA_FORWARD;
7261 
7262 	/*
7263 	 * The same explanation in load_free_space_cache applies here as well,
7264 	 * we only read when we're loading the free space cache, and at that
7265 	 * point the commit_root has everything we need.
7266 	 */
7267 	if (btrfs_is_free_space_inode(inode)) {
7268 		path->search_commit_root = true;
7269 		path->skip_locking = true;
7270 	}
7271 
7272 	ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0);
7273 	if (ret < 0) {
7274 		goto out;
7275 	} else if (ret > 0) {
7276 		if (path->slots[0] == 0)
7277 			goto not_found;
7278 		path->slots[0]--;
7279 		ret = 0;
7280 	}
7281 
7282 	leaf = path->nodes[0];
7283 	item = btrfs_item_ptr(leaf, path->slots[0],
7284 			      struct btrfs_file_extent_item);
7285 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7286 	if (found_key.objectid != objectid ||
7287 	    found_key.type != BTRFS_EXTENT_DATA_KEY) {
7288 		/*
7289 		 * If we backup past the first extent we want to move forward
7290 		 * and see if there is an extent in front of us, otherwise we'll
7291 		 * say there is a hole for our whole search range which can
7292 		 * cause problems.
7293 		 */
7294 		extent_end = start;
7295 		goto next;
7296 	}
7297 
7298 	extent_type = btrfs_file_extent_type(leaf, item);
7299 	extent_start = found_key.offset;
7300 	extent_end = btrfs_file_extent_end(path);
7301 	if (extent_type == BTRFS_FILE_EXTENT_REG ||
7302 	    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
7303 		/* Only regular file could have regular/prealloc extent */
7304 		if (unlikely(!S_ISREG(inode->vfs_inode.i_mode))) {
7305 			ret = -EUCLEAN;
7306 			btrfs_crit(fs_info,
7307 		"regular/prealloc extent found for non-regular inode %llu",
7308 				   btrfs_ino(inode));
7309 			goto out;
7310 		}
7311 		trace_btrfs_get_extent_show_fi_regular(inode, leaf, item,
7312 						       extent_start);
7313 	} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
7314 		trace_btrfs_get_extent_show_fi_inline(inode, leaf, item,
7315 						      path->slots[0],
7316 						      extent_start);
7317 	}
7318 next:
7319 	if (start >= extent_end) {
7320 		path->slots[0]++;
7321 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
7322 			ret = btrfs_next_leaf(root, path);
7323 			if (ret < 0)
7324 				goto out;
7325 			else if (ret > 0)
7326 				goto not_found;
7327 
7328 			leaf = path->nodes[0];
7329 		}
7330 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7331 		if (found_key.objectid != objectid ||
7332 		    found_key.type != BTRFS_EXTENT_DATA_KEY)
7333 			goto not_found;
7334 		if (start + len <= found_key.offset)
7335 			goto not_found;
7336 		if (start > found_key.offset)
7337 			goto next;
7338 
7339 		/* New extent overlaps with existing one */
7340 		em->start = start;
7341 		em->len = found_key.offset - start;
7342 		em->disk_bytenr = EXTENT_MAP_HOLE;
7343 		goto insert;
7344 	}
7345 
7346 	btrfs_extent_item_to_extent_map(inode, path, item, em);
7347 
7348 	if (extent_type == BTRFS_FILE_EXTENT_REG ||
7349 	    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
7350 		goto insert;
7351 	} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
7352 		/*
7353 		 * Inline extent can only exist at file offset 0. This is
7354 		 * ensured by tree-checker and inline extent creation path.
7355 		 * Thus all members representing file offsets should be zero.
7356 		 */
7357 		ASSERT(extent_start == 0);
7358 		ASSERT(em->start == 0);
7359 
7360 		/*
7361 		 * btrfs_extent_item_to_extent_map() should have properly
7362 		 * initialized em members already.
7363 		 *
7364 		 * Other members are not utilized for inline extents.
7365 		 */
7366 		ASSERT(em->disk_bytenr == EXTENT_MAP_INLINE);
7367 		ASSERT(em->len == fs_info->sectorsize);
7368 
7369 		ret = read_inline_extent(path, folio);
7370 		if (ret < 0)
7371 			goto out;
7372 		goto insert;
7373 	}
7374 not_found:
7375 	em->start = start;
7376 	em->len = len;
7377 	em->disk_bytenr = EXTENT_MAP_HOLE;
7378 insert:
7379 	ret = 0;
7380 	btrfs_release_path(path);
7381 	if (unlikely(em->start > start || btrfs_extent_map_end(em) <= start)) {
7382 		btrfs_err(fs_info,
7383 			  "bad extent! em: [%llu %llu] passed [%llu %llu]",
7384 			  em->start, em->len, start, len);
7385 		ret = -EIO;
7386 		goto out;
7387 	}
7388 
7389 	write_lock(&em_tree->lock);
7390 	ret = btrfs_add_extent_mapping(inode, &em, start, len);
7391 	write_unlock(&em_tree->lock);
7392 out:
7393 	btrfs_free_path(path);
7394 
7395 	trace_btrfs_get_extent(root, inode, em);
7396 
7397 	if (ret) {
7398 		btrfs_free_extent_map(em);
7399 		return ERR_PTR(ret);
7400 	}
7401 	return em;
7402 }
7403 
btrfs_extent_readonly(struct btrfs_fs_info * fs_info,u64 bytenr)7404 static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
7405 {
7406 	struct btrfs_block_group *block_group;
7407 	bool readonly = false;
7408 
7409 	block_group = btrfs_lookup_block_group(fs_info, bytenr);
7410 	if (!block_group || block_group->ro)
7411 		readonly = true;
7412 	if (block_group)
7413 		btrfs_put_block_group(block_group);
7414 	return readonly;
7415 }
7416 
7417 /*
7418  * Check if we can do nocow write into the range [@offset, @offset + @len)
7419  *
7420  * @offset:	File offset
7421  * @len:	The length to write, will be updated to the nocow writeable
7422  *		range
7423  * @orig_start:	(optional) Return the original file offset of the file extent
7424  * @orig_len:	(optional) Return the original on-disk length of the file extent
7425  * @ram_bytes:	(optional) Return the ram_bytes of the file extent
7426  *
7427  * Return:
7428  * >0	and update @len if we can do nocow write
7429  *  0	if we can't do nocow write
7430  * <0	if error happened
7431  *
7432  * NOTE: This only checks the file extents, caller is responsible to wait for
7433  *	 any ordered extents.
7434  */
can_nocow_extent(struct btrfs_inode * inode,u64 offset,u64 * len,struct btrfs_file_extent * file_extent,bool nowait)7435 noinline int can_nocow_extent(struct btrfs_inode *inode, u64 offset, u64 *len,
7436 			      struct btrfs_file_extent *file_extent,
7437 			      bool nowait)
7438 {
7439 	struct btrfs_root *root = inode->root;
7440 	struct btrfs_fs_info *fs_info = root->fs_info;
7441 	struct can_nocow_file_extent_args nocow_args = { 0 };
7442 	BTRFS_PATH_AUTO_FREE(path);
7443 	int ret;
7444 	struct extent_buffer *leaf;
7445 	struct extent_io_tree *io_tree = &inode->io_tree;
7446 	struct btrfs_file_extent_item *fi;
7447 	struct btrfs_key key;
7448 	int found_type;
7449 
7450 	path = btrfs_alloc_path();
7451 	if (!path)
7452 		return -ENOMEM;
7453 	path->nowait = nowait;
7454 
7455 	ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
7456 				       offset, 0);
7457 	if (ret < 0)
7458 		return ret;
7459 
7460 	if (ret == 1) {
7461 		if (path->slots[0] == 0) {
7462 			/* Can't find the item, must COW. */
7463 			return 0;
7464 		}
7465 		path->slots[0]--;
7466 	}
7467 	ret = 0;
7468 	leaf = path->nodes[0];
7469 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
7470 	if (key.objectid != btrfs_ino(inode) ||
7471 	    key.type != BTRFS_EXTENT_DATA_KEY) {
7472 		/* Not our file or wrong item type, must COW. */
7473 		return 0;
7474 	}
7475 
7476 	if (key.offset > offset) {
7477 		/* Wrong offset, must COW. */
7478 		return 0;
7479 	}
7480 
7481 	if (btrfs_file_extent_end(path) <= offset)
7482 		return 0;
7483 
7484 	fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
7485 	found_type = btrfs_file_extent_type(leaf, fi);
7486 
7487 	nocow_args.start = offset;
7488 	nocow_args.end = offset + *len - 1;
7489 	nocow_args.free_path = true;
7490 
7491 	ret = can_nocow_file_extent(path, &key, inode, &nocow_args);
7492 	/* can_nocow_file_extent() has freed the path. */
7493 	path = NULL;
7494 
7495 	if (ret != 1) {
7496 		/* Treat errors as not being able to NOCOW. */
7497 		return 0;
7498 	}
7499 
7500 	if (btrfs_extent_readonly(fs_info,
7501 				  nocow_args.file_extent.disk_bytenr +
7502 				  nocow_args.file_extent.offset))
7503 		return 0;
7504 
7505 	if (!(inode->flags & BTRFS_INODE_NODATACOW) &&
7506 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7507 		u64 range_end;
7508 
7509 		range_end = round_up(offset + nocow_args.file_extent.num_bytes,
7510 				     root->fs_info->sectorsize) - 1;
7511 		ret = btrfs_test_range_bit_exists(io_tree, offset, range_end,
7512 						  EXTENT_DELALLOC);
7513 		if (ret)
7514 			return -EAGAIN;
7515 	}
7516 
7517 	if (file_extent)
7518 		memcpy(file_extent, &nocow_args.file_extent, sizeof(*file_extent));
7519 
7520 	*len = nocow_args.file_extent.num_bytes;
7521 
7522 	return 1;
7523 }
7524 
7525 /* The callers of this must take lock_extent() */
btrfs_create_io_em(struct btrfs_inode * inode,u64 start,const struct btrfs_file_extent * file_extent,int type)7526 struct extent_map *btrfs_create_io_em(struct btrfs_inode *inode, u64 start,
7527 				      const struct btrfs_file_extent *file_extent,
7528 				      int type)
7529 {
7530 	struct extent_map *em;
7531 	int ret;
7532 
7533 	/*
7534 	 * Note the missing NOCOW type.
7535 	 *
7536 	 * For pure NOCOW writes, we should not create an io extent map, but
7537 	 * just reusing the existing one.
7538 	 * Only PREALLOC writes (NOCOW write into preallocated range) can
7539 	 * create an io extent map.
7540 	 */
7541 	ASSERT(type == BTRFS_ORDERED_PREALLOC ||
7542 	       type == BTRFS_ORDERED_COMPRESSED ||
7543 	       type == BTRFS_ORDERED_REGULAR);
7544 
7545 	switch (type) {
7546 	case BTRFS_ORDERED_PREALLOC:
7547 		/* We're only referring part of a larger preallocated extent. */
7548 		ASSERT(file_extent->num_bytes <= file_extent->ram_bytes);
7549 		break;
7550 	case BTRFS_ORDERED_REGULAR:
7551 		/* COW results a new extent matching our file extent size. */
7552 		ASSERT(file_extent->disk_num_bytes == file_extent->num_bytes);
7553 		ASSERT(file_extent->ram_bytes == file_extent->num_bytes);
7554 
7555 		/* Since it's a new extent, we should not have any offset. */
7556 		ASSERT(file_extent->offset == 0);
7557 		break;
7558 	case BTRFS_ORDERED_COMPRESSED:
7559 		/* Must be compressed. */
7560 		ASSERT(file_extent->compression != BTRFS_COMPRESS_NONE);
7561 
7562 		/*
7563 		 * Encoded write can make us to refer to part of the
7564 		 * uncompressed extent.
7565 		 */
7566 		ASSERT(file_extent->num_bytes <= file_extent->ram_bytes);
7567 		break;
7568 	}
7569 
7570 	em = btrfs_alloc_extent_map();
7571 	if (!em)
7572 		return ERR_PTR(-ENOMEM);
7573 
7574 	em->start = start;
7575 	em->len = file_extent->num_bytes;
7576 	em->disk_bytenr = file_extent->disk_bytenr;
7577 	em->disk_num_bytes = file_extent->disk_num_bytes;
7578 	em->ram_bytes = file_extent->ram_bytes;
7579 	em->generation = -1;
7580 	em->offset = file_extent->offset;
7581 	em->flags |= EXTENT_FLAG_PINNED;
7582 	if (type == BTRFS_ORDERED_COMPRESSED)
7583 		btrfs_extent_map_set_compression(em, file_extent->compression);
7584 
7585 	ret = btrfs_replace_extent_map_range(inode, em, true);
7586 	if (ret) {
7587 		btrfs_free_extent_map(em);
7588 		return ERR_PTR(ret);
7589 	}
7590 
7591 	/* em got 2 refs now, callers needs to do btrfs_free_extent_map once. */
7592 	return em;
7593 }
7594 
7595 /*
7596  * For release_folio() and invalidate_folio() we have a race window where
7597  * folio_end_writeback() is called but the subpage spinlock is not yet released.
7598  * If we continue to release/invalidate the page, we could cause use-after-free
7599  * for subpage spinlock.  So this function is to spin and wait for subpage
7600  * spinlock.
7601  */
wait_subpage_spinlock(struct folio * folio)7602 static void wait_subpage_spinlock(struct folio *folio)
7603 {
7604 	struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
7605 	struct btrfs_folio_state *bfs;
7606 
7607 	if (!btrfs_is_subpage(fs_info, folio))
7608 		return;
7609 
7610 	ASSERT(folio_test_private(folio) && folio_get_private(folio));
7611 	bfs = folio_get_private(folio);
7612 
7613 	/*
7614 	 * This may look insane as we just acquire the spinlock and release it,
7615 	 * without doing anything.  But we just want to make sure no one is
7616 	 * still holding the subpage spinlock.
7617 	 * And since the page is not dirty nor writeback, and we have page
7618 	 * locked, the only possible way to hold a spinlock is from the endio
7619 	 * function to clear page writeback.
7620 	 *
7621 	 * Here we just acquire the spinlock so that all existing callers
7622 	 * should exit and we're safe to release/invalidate the page.
7623 	 */
7624 	spin_lock_irq(&bfs->lock);
7625 	spin_unlock_irq(&bfs->lock);
7626 }
7627 
btrfs_launder_folio(struct folio * folio)7628 static int btrfs_launder_folio(struct folio *folio)
7629 {
7630 	return btrfs_qgroup_free_data(folio_to_inode(folio), NULL, folio_pos(folio),
7631 				      folio_size(folio), NULL);
7632 }
7633 
__btrfs_release_folio(struct folio * folio,gfp_t gfp_flags)7634 static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
7635 {
7636 	if (try_release_extent_mapping(folio, gfp_flags)) {
7637 		wait_subpage_spinlock(folio);
7638 		clear_folio_extent_mapped(folio);
7639 		return true;
7640 	}
7641 	return false;
7642 }
7643 
btrfs_release_folio(struct folio * folio,gfp_t gfp_flags)7644 static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
7645 {
7646 	if (folio_test_writeback(folio) || folio_test_dirty(folio))
7647 		return false;
7648 	return __btrfs_release_folio(folio, gfp_flags);
7649 }
7650 
7651 #ifdef CONFIG_MIGRATION
btrfs_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)7652 static int btrfs_migrate_folio(struct address_space *mapping,
7653 			     struct folio *dst, struct folio *src,
7654 			     enum migrate_mode mode)
7655 {
7656 	int ret = filemap_migrate_folio(mapping, dst, src, mode);
7657 
7658 	if (ret)
7659 		return ret;
7660 
7661 	if (folio_test_ordered(src)) {
7662 		folio_clear_ordered(src);
7663 		folio_set_ordered(dst);
7664 	}
7665 
7666 	return 0;
7667 }
7668 #else
7669 #define btrfs_migrate_folio NULL
7670 #endif
7671 
btrfs_invalidate_folio(struct folio * folio,size_t offset,size_t length)7672 static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
7673 				 size_t length)
7674 {
7675 	struct btrfs_inode *inode = folio_to_inode(folio);
7676 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
7677 	struct extent_io_tree *tree = &inode->io_tree;
7678 	struct extent_state *cached_state = NULL;
7679 	u64 page_start = folio_pos(folio);
7680 	u64 page_end = page_start + folio_size(folio) - 1;
7681 	u64 cur;
7682 	int inode_evicting = inode_state_read_once(&inode->vfs_inode) & I_FREEING;
7683 
7684 	/*
7685 	 * We have folio locked so no new ordered extent can be created on this
7686 	 * page, nor bio can be submitted for this folio.
7687 	 *
7688 	 * But already submitted bio can still be finished on this folio.
7689 	 * Furthermore, endio function won't skip folio which has Ordered
7690 	 * already cleared, so it's possible for endio and
7691 	 * invalidate_folio to do the same ordered extent accounting twice
7692 	 * on one folio.
7693 	 *
7694 	 * So here we wait for any submitted bios to finish, so that we won't
7695 	 * do double ordered extent accounting on the same folio.
7696 	 */
7697 	folio_wait_writeback(folio);
7698 	wait_subpage_spinlock(folio);
7699 
7700 	/*
7701 	 * For subpage case, we have call sites like
7702 	 * btrfs_punch_hole_lock_range() which passes range not aligned to
7703 	 * sectorsize.
7704 	 * If the range doesn't cover the full folio, we don't need to and
7705 	 * shouldn't clear page extent mapped, as folio->private can still
7706 	 * record subpage dirty bits for other part of the range.
7707 	 *
7708 	 * For cases that invalidate the full folio even the range doesn't
7709 	 * cover the full folio, like invalidating the last folio, we're
7710 	 * still safe to wait for ordered extent to finish.
7711 	 */
7712 	if (!(offset == 0 && length == folio_size(folio))) {
7713 		btrfs_release_folio(folio, GFP_NOFS);
7714 		return;
7715 	}
7716 
7717 	if (!inode_evicting)
7718 		btrfs_lock_extent(tree, page_start, page_end, &cached_state);
7719 
7720 	cur = page_start;
7721 	while (cur < page_end) {
7722 		struct btrfs_ordered_extent *ordered;
7723 		u64 range_end;
7724 		u32 range_len;
7725 		u32 extra_flags = 0;
7726 
7727 		ordered = btrfs_lookup_first_ordered_range(inode, cur,
7728 							   page_end + 1 - cur);
7729 		if (!ordered) {
7730 			range_end = page_end;
7731 			/*
7732 			 * No ordered extent covering this range, we are safe
7733 			 * to delete all extent states in the range.
7734 			 */
7735 			extra_flags = EXTENT_CLEAR_ALL_BITS;
7736 			goto next;
7737 		}
7738 		if (ordered->file_offset > cur) {
7739 			/*
7740 			 * There is a range between [cur, oe->file_offset) not
7741 			 * covered by any ordered extent.
7742 			 * We are safe to delete all extent states, and handle
7743 			 * the ordered extent in the next iteration.
7744 			 */
7745 			range_end = ordered->file_offset - 1;
7746 			extra_flags = EXTENT_CLEAR_ALL_BITS;
7747 			goto next;
7748 		}
7749 
7750 		range_end = min(ordered->file_offset + ordered->num_bytes - 1,
7751 				page_end);
7752 		ASSERT(range_end + 1 - cur < U32_MAX);
7753 		range_len = range_end + 1 - cur;
7754 		if (!btrfs_folio_test_ordered(fs_info, folio, cur, range_len)) {
7755 			/*
7756 			 * If Ordered is cleared, it means endio has
7757 			 * already been executed for the range.
7758 			 * We can't delete the extent states as
7759 			 * btrfs_finish_ordered_io() may still use some of them.
7760 			 */
7761 			goto next;
7762 		}
7763 		btrfs_folio_clear_ordered(fs_info, folio, cur, range_len);
7764 
7765 		/*
7766 		 * IO on this page will never be started, so we need to account
7767 		 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW
7768 		 * here, must leave that up for the ordered extent completion.
7769 		 *
7770 		 * This will also unlock the range for incoming
7771 		 * btrfs_finish_ordered_io().
7772 		 */
7773 		if (!inode_evicting)
7774 			btrfs_clear_extent_bit(tree, cur, range_end,
7775 					       EXTENT_DELALLOC |
7776 					       EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
7777 					       EXTENT_DEFRAG, &cached_state);
7778 
7779 		spin_lock(&inode->ordered_tree_lock);
7780 		set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
7781 		ordered->truncated_len = min(ordered->truncated_len,
7782 					     cur - ordered->file_offset);
7783 		spin_unlock(&inode->ordered_tree_lock);
7784 
7785 		/*
7786 		 * If the ordered extent has finished, we're safe to delete all
7787 		 * the extent states of the range, otherwise
7788 		 * btrfs_finish_ordered_io() will get executed by endio for
7789 		 * other pages, so we can't delete extent states.
7790 		 */
7791 		if (btrfs_dec_test_ordered_pending(inode, &ordered,
7792 						   cur, range_end + 1 - cur)) {
7793 			btrfs_finish_ordered_io(ordered);
7794 			/*
7795 			 * The ordered extent has finished, now we're again
7796 			 * safe to delete all extent states of the range.
7797 			 */
7798 			extra_flags = EXTENT_CLEAR_ALL_BITS;
7799 		}
7800 next:
7801 		if (ordered)
7802 			btrfs_put_ordered_extent(ordered);
7803 		/*
7804 		 * Qgroup reserved space handler
7805 		 * Sector(s) here will be either:
7806 		 *
7807 		 * 1) Already written to disk or bio already finished
7808 		 *    Then its QGROUP_RESERVED bit in io_tree is already cleared.
7809 		 *    Qgroup will be handled by its qgroup_record then.
7810 		 *    btrfs_qgroup_free_data() call will do nothing here.
7811 		 *
7812 		 * 2) Not written to disk yet
7813 		 *    Then btrfs_qgroup_free_data() call will clear the
7814 		 *    QGROUP_RESERVED bit of its io_tree, and free the qgroup
7815 		 *    reserved data space.
7816 		 *    Since the IO will never happen for this page.
7817 		 */
7818 		btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur, NULL);
7819 		if (!inode_evicting)
7820 			btrfs_clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED |
7821 					       EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
7822 					       EXTENT_DEFRAG | extra_flags,
7823 					       &cached_state);
7824 		cur = range_end + 1;
7825 	}
7826 	/*
7827 	 * We have iterated through all ordered extents of the page, the page
7828 	 * should not have Ordered anymore, or the above iteration
7829 	 * did something wrong.
7830 	 */
7831 	ASSERT(!folio_test_ordered(folio));
7832 	btrfs_folio_clear_checked(fs_info, folio, folio_pos(folio), folio_size(folio));
7833 	if (!inode_evicting)
7834 		__btrfs_release_folio(folio, GFP_NOFS);
7835 	clear_folio_extent_mapped(folio);
7836 }
7837 
btrfs_truncate(struct btrfs_inode * inode,bool skip_writeback)7838 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
7839 {
7840 	struct btrfs_truncate_control control = {
7841 		.inode = inode,
7842 		.ino = btrfs_ino(inode),
7843 		.min_type = BTRFS_EXTENT_DATA_KEY,
7844 		.clear_extent_range = true,
7845 		.new_size = inode->vfs_inode.i_size,
7846 	};
7847 	struct btrfs_root *root = inode->root;
7848 	struct btrfs_fs_info *fs_info = root->fs_info;
7849 	struct btrfs_block_rsv rsv;
7850 	int ret;
7851 	struct btrfs_trans_handle *trans;
7852 	const u64 min_size = btrfs_calc_metadata_size(fs_info, 1);
7853 	const u64 lock_start = round_down(inode->vfs_inode.i_size, fs_info->sectorsize);
7854 	const u64 i_size_up = round_up(inode->vfs_inode.i_size, fs_info->sectorsize);
7855 
7856 	/* Our inode is locked and the i_size can't be changed concurrently. */
7857 	btrfs_assert_inode_locked(inode);
7858 
7859 	if (!skip_writeback) {
7860 		ret = btrfs_wait_ordered_range(inode, lock_start, (u64)-1);
7861 		if (ret)
7862 			return ret;
7863 	}
7864 
7865 	/*
7866 	 * Yes ladies and gentlemen, this is indeed ugly.  We have a couple of
7867 	 * things going on here:
7868 	 *
7869 	 * 1) We need to reserve space to update our inode.
7870 	 *
7871 	 * 2) We need to have something to cache all the space that is going to
7872 	 * be free'd up by the truncate operation, but also have some slack
7873 	 * space reserved in case it uses space during the truncate (thank you
7874 	 * very much snapshotting).
7875 	 *
7876 	 * And we need these to be separate.  The fact is we can use a lot of
7877 	 * space doing the truncate, and we have no earthly idea how much space
7878 	 * we will use, so we need the truncate reservation to be separate so it
7879 	 * doesn't end up using space reserved for updating the inode.  We also
7880 	 * need to be able to stop the transaction and start a new one, which
7881 	 * means we need to be able to update the inode several times, and we
7882 	 * have no idea of knowing how many times that will be, so we can't just
7883 	 * reserve 1 item for the entirety of the operation, so that has to be
7884 	 * done separately as well.
7885 	 *
7886 	 * So that leaves us with
7887 	 *
7888 	 * 1) rsv - for the truncate reservation, which we will steal from the
7889 	 * transaction reservation.
7890 	 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for
7891 	 * updating the inode.
7892 	 */
7893 	btrfs_init_metadata_block_rsv(fs_info, &rsv, BTRFS_BLOCK_RSV_TEMP);
7894 	rsv.size = min_size;
7895 	rsv.failfast = true;
7896 
7897 	/*
7898 	 * 1 for the truncate slack space
7899 	 * 1 for updating the inode.
7900 	 */
7901 	trans = btrfs_start_transaction(root, 2);
7902 	if (IS_ERR(trans)) {
7903 		ret = PTR_ERR(trans);
7904 		goto out;
7905 	}
7906 
7907 	/* Migrate the slack space for the truncate to our reserve */
7908 	ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, &rsv,
7909 				      min_size, false);
7910 	/*
7911 	 * We have reserved 2 metadata units when we started the transaction and
7912 	 * min_size matches 1 unit, so this should never fail, but if it does,
7913 	 * it's not critical we just fail truncation.
7914 	 */
7915 	if (WARN_ON(ret)) {
7916 		btrfs_end_transaction(trans);
7917 		goto out;
7918 	}
7919 
7920 	trans->block_rsv = &rsv;
7921 
7922 	while (1) {
7923 		struct extent_state *cached_state = NULL;
7924 
7925 		btrfs_lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
7926 		/*
7927 		 * We want to drop from the next block forward in case this new
7928 		 * size is not block aligned since we will be keeping the last
7929 		 * block of the extent just the way it is.
7930 		 */
7931 		btrfs_drop_extent_map_range(inode, i_size_up, (u64)-1, false);
7932 
7933 		ret = btrfs_truncate_inode_items(trans, root, &control);
7934 
7935 		inode_sub_bytes(&inode->vfs_inode, control.sub_bytes);
7936 		btrfs_inode_safe_disk_i_size_write(inode, control.last_size);
7937 
7938 		btrfs_unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
7939 
7940 		trans->block_rsv = &fs_info->trans_block_rsv;
7941 		if (ret != -ENOSPC && ret != -EAGAIN)
7942 			break;
7943 
7944 		ret = btrfs_update_inode(trans, inode);
7945 		if (ret)
7946 			break;
7947 
7948 		btrfs_end_transaction(trans);
7949 		btrfs_btree_balance_dirty(fs_info);
7950 
7951 		trans = btrfs_start_transaction(root, 2);
7952 		if (IS_ERR(trans)) {
7953 			ret = PTR_ERR(trans);
7954 			trans = NULL;
7955 			break;
7956 		}
7957 
7958 		btrfs_block_rsv_release(fs_info, &rsv, -1, NULL);
7959 		ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
7960 					      &rsv, min_size, false);
7961 		/*
7962 		 * We have reserved 2 metadata units when we started the
7963 		 * transaction and min_size matches 1 unit, so this should never
7964 		 * fail, but if it does, it's not critical we just fail truncation.
7965 		 */
7966 		if (WARN_ON(ret))
7967 			break;
7968 
7969 		trans->block_rsv = &rsv;
7970 	}
7971 
7972 	/*
7973 	 * We can't call btrfs_truncate_block inside a trans handle as we could
7974 	 * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we
7975 	 * know we've truncated everything except the last little bit, and can
7976 	 * do btrfs_truncate_block and then update the disk_i_size.
7977 	 */
7978 	if (ret == BTRFS_NEED_TRUNCATE_BLOCK) {
7979 		btrfs_end_transaction(trans);
7980 		btrfs_btree_balance_dirty(fs_info);
7981 
7982 		ret = btrfs_truncate_block(inode, inode->vfs_inode.i_size,
7983 					   inode->vfs_inode.i_size, (u64)-1);
7984 		if (ret)
7985 			goto out;
7986 		trans = btrfs_start_transaction(root, 1);
7987 		if (IS_ERR(trans)) {
7988 			ret = PTR_ERR(trans);
7989 			goto out;
7990 		}
7991 		btrfs_inode_safe_disk_i_size_write(inode, 0);
7992 	}
7993 
7994 	if (trans) {
7995 		int ret2;
7996 
7997 		trans->block_rsv = &fs_info->trans_block_rsv;
7998 		ret2 = btrfs_update_inode(trans, inode);
7999 		if (ret2 && !ret)
8000 			ret = ret2;
8001 
8002 		ret2 = btrfs_end_transaction(trans);
8003 		if (ret2 && !ret)
8004 			ret = ret2;
8005 		btrfs_btree_balance_dirty(fs_info);
8006 	}
8007 out:
8008 	btrfs_block_rsv_release(fs_info, &rsv, (u64)-1, NULL);
8009 	/*
8010 	 * So if we truncate and then write and fsync we normally would just
8011 	 * write the extents that changed, which is a problem if we need to
8012 	 * first truncate that entire inode.  So set this flag so we write out
8013 	 * all of the extents in the inode to the sync log so we're completely
8014 	 * safe.
8015 	 *
8016 	 * If no extents were dropped or trimmed we don't need to force the next
8017 	 * fsync to truncate all the inode's items from the log and re-log them
8018 	 * all. This means the truncate operation did not change the file size,
8019 	 * or changed it to a smaller size but there was only an implicit hole
8020 	 * between the old i_size and the new i_size, and there were no prealloc
8021 	 * extents beyond i_size to drop.
8022 	 */
8023 	if (control.extents_found > 0)
8024 		btrfs_set_inode_full_sync(inode);
8025 
8026 	return ret;
8027 }
8028 
btrfs_new_subvol_inode(struct mnt_idmap * idmap,struct inode * dir)8029 struct inode *btrfs_new_subvol_inode(struct mnt_idmap *idmap,
8030 				     struct inode *dir)
8031 {
8032 	struct inode *inode;
8033 
8034 	inode = new_inode(dir->i_sb);
8035 	if (inode) {
8036 		/*
8037 		 * Subvolumes don't inherit the sgid bit or the parent's gid if
8038 		 * the parent's sgid bit is set. This is probably a bug.
8039 		 */
8040 		inode_init_owner(idmap, inode, NULL,
8041 				 S_IFDIR | (~current_umask() & S_IRWXUGO));
8042 		inode->i_op = &btrfs_dir_inode_operations;
8043 		inode->i_fop = &btrfs_dir_file_operations;
8044 	}
8045 	return inode;
8046 }
8047 
btrfs_alloc_inode(struct super_block * sb)8048 struct inode *btrfs_alloc_inode(struct super_block *sb)
8049 {
8050 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
8051 	struct btrfs_inode *ei;
8052 	struct inode *inode;
8053 
8054 	ei = alloc_inode_sb(sb, btrfs_inode_cachep, GFP_KERNEL);
8055 	if (!ei)
8056 		return NULL;
8057 
8058 	ei->root = NULL;
8059 	ei->generation = 0;
8060 	ei->last_trans = 0;
8061 	ei->last_sub_trans = 0;
8062 	ei->logged_trans = 0;
8063 	ei->delalloc_bytes = 0;
8064 	/* new_delalloc_bytes and last_dir_index_offset are in a union. */
8065 	ei->new_delalloc_bytes = 0;
8066 	ei->defrag_bytes = 0;
8067 	ei->disk_i_size = 0;
8068 	ei->flags = 0;
8069 	ei->ro_flags = 0;
8070 	/*
8071 	 * ->index_cnt will be properly initialized later when creating a new
8072 	 * inode (btrfs_create_new_inode()) or when reading an existing inode
8073 	 * from disk (btrfs_read_locked_inode()).
8074 	 */
8075 	ei->csum_bytes = 0;
8076 	ei->dir_index = 0;
8077 	ei->last_unlink_trans = 0;
8078 	ei->last_reflink_trans = 0;
8079 	ei->last_log_commit = 0;
8080 
8081 	spin_lock_init(&ei->lock);
8082 	ei->outstanding_extents = 0;
8083 	if (sb->s_magic != BTRFS_TEST_MAGIC)
8084 		btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv,
8085 					      BTRFS_BLOCK_RSV_DELALLOC);
8086 	ei->runtime_flags = 0;
8087 	ei->prop_compress = BTRFS_COMPRESS_NONE;
8088 	ei->defrag_compress = BTRFS_COMPRESS_NONE;
8089 
8090 	ei->delayed_node = NULL;
8091 
8092 	ei->i_otime_sec = 0;
8093 	ei->i_otime_nsec = 0;
8094 
8095 	inode = &ei->vfs_inode;
8096 	btrfs_extent_map_tree_init(&ei->extent_tree);
8097 
8098 	/* This io tree sets the valid inode. */
8099 	btrfs_extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO);
8100 	ei->io_tree.inode = ei;
8101 
8102 	ei->file_extent_tree = NULL;
8103 
8104 	mutex_init(&ei->log_mutex);
8105 	spin_lock_init(&ei->ordered_tree_lock);
8106 	ei->ordered_tree = RB_ROOT;
8107 	ei->ordered_tree_last = NULL;
8108 	INIT_LIST_HEAD(&ei->delalloc_inodes);
8109 	INIT_LIST_HEAD(&ei->delayed_iput);
8110 	init_rwsem(&ei->i_mmap_lock);
8111 
8112 	return inode;
8113 }
8114 
8115 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
btrfs_test_destroy_inode(struct inode * inode)8116 void btrfs_test_destroy_inode(struct inode *inode)
8117 {
8118 	btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
8119 	kfree(BTRFS_I(inode)->file_extent_tree);
8120 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
8121 }
8122 #endif
8123 
btrfs_free_inode(struct inode * inode)8124 void btrfs_free_inode(struct inode *inode)
8125 {
8126 	kfree(BTRFS_I(inode)->file_extent_tree);
8127 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
8128 }
8129 
btrfs_destroy_inode(struct inode * vfs_inode)8130 void btrfs_destroy_inode(struct inode *vfs_inode)
8131 {
8132 	struct btrfs_ordered_extent *ordered;
8133 	struct btrfs_inode *inode = BTRFS_I(vfs_inode);
8134 	struct btrfs_root *root = inode->root;
8135 	bool freespace_inode;
8136 
8137 	WARN_ON(!hlist_empty(&vfs_inode->i_dentry));
8138 	WARN_ON(vfs_inode->i_data.nrpages);
8139 	WARN_ON(inode->block_rsv.reserved);
8140 	WARN_ON(inode->block_rsv.size);
8141 	WARN_ON(inode->outstanding_extents);
8142 	if (!S_ISDIR(vfs_inode->i_mode)) {
8143 		WARN_ON(inode->delalloc_bytes);
8144 		WARN_ON(inode->new_delalloc_bytes);
8145 		WARN_ON(inode->csum_bytes);
8146 	}
8147 	if (!root || !btrfs_is_data_reloc_root(root))
8148 		WARN_ON(inode->defrag_bytes);
8149 
8150 	/*
8151 	 * This can happen where we create an inode, but somebody else also
8152 	 * created the same inode and we need to destroy the one we already
8153 	 * created.
8154 	 */
8155 	if (!root)
8156 		return;
8157 
8158 	/*
8159 	 * If this is a free space inode do not take the ordered extents lockdep
8160 	 * map.
8161 	 */
8162 	freespace_inode = btrfs_is_free_space_inode(inode);
8163 
8164 	while (1) {
8165 		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
8166 		if (!ordered)
8167 			break;
8168 		else {
8169 			btrfs_err(root->fs_info,
8170 				  "found ordered extent %llu %llu on inode cleanup",
8171 				  ordered->file_offset, ordered->num_bytes);
8172 
8173 			if (!freespace_inode)
8174 				btrfs_lockdep_acquire(root->fs_info, btrfs_ordered_extent);
8175 
8176 			btrfs_remove_ordered_extent(inode, ordered);
8177 			btrfs_put_ordered_extent(ordered);
8178 			btrfs_put_ordered_extent(ordered);
8179 		}
8180 	}
8181 	btrfs_qgroup_check_reserved_leak(inode);
8182 	btrfs_del_inode_from_root(inode);
8183 	btrfs_drop_extent_map_range(inode, 0, (u64)-1, false);
8184 	btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1);
8185 	btrfs_put_root(inode->root);
8186 }
8187 
btrfs_drop_inode(struct inode * inode)8188 int btrfs_drop_inode(struct inode *inode)
8189 {
8190 	struct btrfs_root *root = BTRFS_I(inode)->root;
8191 
8192 	if (root == NULL)
8193 		return 1;
8194 
8195 	/* the snap/subvol tree is on deleting */
8196 	if (btrfs_root_refs(&root->root_item) == 0)
8197 		return 1;
8198 	else
8199 		return inode_generic_drop(inode);
8200 }
8201 
init_once(void * foo)8202 static void init_once(void *foo)
8203 {
8204 	struct btrfs_inode *ei = foo;
8205 
8206 	inode_init_once(&ei->vfs_inode);
8207 }
8208 
btrfs_destroy_cachep(void)8209 void __cold btrfs_destroy_cachep(void)
8210 {
8211 	/*
8212 	 * Make sure all delayed rcu free inodes are flushed before we
8213 	 * destroy cache.
8214 	 */
8215 	rcu_barrier();
8216 	kmem_cache_destroy(btrfs_inode_cachep);
8217 }
8218 
btrfs_init_cachep(void)8219 int __init btrfs_init_cachep(void)
8220 {
8221 	btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
8222 			sizeof(struct btrfs_inode), 0,
8223 			SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT,
8224 			init_once);
8225 	if (!btrfs_inode_cachep)
8226 		return -ENOMEM;
8227 
8228 	return 0;
8229 }
8230 
btrfs_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int flags)8231 static int btrfs_getattr(struct mnt_idmap *idmap,
8232 			 const struct path *path, struct kstat *stat,
8233 			 u32 request_mask, unsigned int flags)
8234 {
8235 	u64 delalloc_bytes;
8236 	u64 inode_bytes;
8237 	struct inode *inode = d_inode(path->dentry);
8238 	u32 blocksize = btrfs_sb(inode->i_sb)->sectorsize;
8239 	u32 bi_flags = BTRFS_I(inode)->flags;
8240 	u32 bi_ro_flags = BTRFS_I(inode)->ro_flags;
8241 
8242 	stat->result_mask |= STATX_BTIME;
8243 	stat->btime.tv_sec = BTRFS_I(inode)->i_otime_sec;
8244 	stat->btime.tv_nsec = BTRFS_I(inode)->i_otime_nsec;
8245 	if (bi_flags & BTRFS_INODE_APPEND)
8246 		stat->attributes |= STATX_ATTR_APPEND;
8247 	if (bi_flags & BTRFS_INODE_COMPRESS)
8248 		stat->attributes |= STATX_ATTR_COMPRESSED;
8249 	if (bi_flags & BTRFS_INODE_IMMUTABLE)
8250 		stat->attributes |= STATX_ATTR_IMMUTABLE;
8251 	if (bi_flags & BTRFS_INODE_NODUMP)
8252 		stat->attributes |= STATX_ATTR_NODUMP;
8253 	if (bi_ro_flags & BTRFS_INODE_RO_VERITY)
8254 		stat->attributes |= STATX_ATTR_VERITY;
8255 
8256 	stat->attributes_mask |= (STATX_ATTR_APPEND |
8257 				  STATX_ATTR_COMPRESSED |
8258 				  STATX_ATTR_IMMUTABLE |
8259 				  STATX_ATTR_NODUMP);
8260 
8261 	generic_fillattr(idmap, request_mask, inode, stat);
8262 	stat->dev = BTRFS_I(inode)->root->anon_dev;
8263 
8264 	stat->subvol = btrfs_root_id(BTRFS_I(inode)->root);
8265 	stat->result_mask |= STATX_SUBVOL;
8266 
8267 	spin_lock(&BTRFS_I(inode)->lock);
8268 	delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes;
8269 	inode_bytes = inode_get_bytes(inode);
8270 	spin_unlock(&BTRFS_I(inode)->lock);
8271 	stat->blocks = (ALIGN(inode_bytes, blocksize) +
8272 			ALIGN(delalloc_bytes, blocksize)) >> SECTOR_SHIFT;
8273 	return 0;
8274 }
8275 
btrfs_rename_exchange(struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry)8276 static int btrfs_rename_exchange(struct inode *old_dir,
8277 			      struct dentry *old_dentry,
8278 			      struct inode *new_dir,
8279 			      struct dentry *new_dentry)
8280 {
8281 	struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir);
8282 	struct btrfs_trans_handle *trans;
8283 	unsigned int trans_num_items;
8284 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
8285 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
8286 	struct inode *new_inode = new_dentry->d_inode;
8287 	struct inode *old_inode = old_dentry->d_inode;
8288 	struct btrfs_rename_ctx old_rename_ctx;
8289 	struct btrfs_rename_ctx new_rename_ctx;
8290 	u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
8291 	u64 new_ino = btrfs_ino(BTRFS_I(new_inode));
8292 	u64 old_idx = 0;
8293 	u64 new_idx = 0;
8294 	int ret;
8295 	int ret2;
8296 	bool need_abort = false;
8297 	bool logs_pinned = false;
8298 	struct fscrypt_name old_fname, new_fname;
8299 	struct fscrypt_str *old_name, *new_name;
8300 
8301 	/*
8302 	 * For non-subvolumes allow exchange only within one subvolume, in the
8303 	 * same inode namespace. Two subvolumes (represented as directory) can
8304 	 * be exchanged as they're a logical link and have a fixed inode number.
8305 	 */
8306 	if (root != dest &&
8307 	    (old_ino != BTRFS_FIRST_FREE_OBJECTID ||
8308 	     new_ino != BTRFS_FIRST_FREE_OBJECTID))
8309 		return -EXDEV;
8310 
8311 	ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname);
8312 	if (ret)
8313 		return ret;
8314 
8315 	ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname);
8316 	if (ret) {
8317 		fscrypt_free_filename(&old_fname);
8318 		return ret;
8319 	}
8320 
8321 	old_name = &old_fname.disk_name;
8322 	new_name = &new_fname.disk_name;
8323 
8324 	/* close the race window with snapshot create/destroy ioctl */
8325 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID ||
8326 	    new_ino == BTRFS_FIRST_FREE_OBJECTID)
8327 		down_read(&fs_info->subvol_sem);
8328 
8329 	/*
8330 	 * For each inode:
8331 	 * 1 to remove old dir item
8332 	 * 1 to remove old dir index
8333 	 * 1 to add new dir item
8334 	 * 1 to add new dir index
8335 	 * 1 to update parent inode
8336 	 *
8337 	 * If the parents are the same, we only need to account for one
8338 	 */
8339 	trans_num_items = (old_dir == new_dir ? 9 : 10);
8340 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8341 		/*
8342 		 * 1 to remove old root ref
8343 		 * 1 to remove old root backref
8344 		 * 1 to add new root ref
8345 		 * 1 to add new root backref
8346 		 */
8347 		trans_num_items += 4;
8348 	} else {
8349 		/*
8350 		 * 1 to update inode item
8351 		 * 1 to remove old inode ref
8352 		 * 1 to add new inode ref
8353 		 */
8354 		trans_num_items += 3;
8355 	}
8356 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
8357 		trans_num_items += 4;
8358 	else
8359 		trans_num_items += 3;
8360 	trans = btrfs_start_transaction(root, trans_num_items);
8361 	if (IS_ERR(trans)) {
8362 		ret = PTR_ERR(trans);
8363 		goto out_notrans;
8364 	}
8365 
8366 	if (dest != root) {
8367 		ret = btrfs_record_root_in_trans(trans, dest);
8368 		if (ret)
8369 			goto out_fail;
8370 	}
8371 
8372 	/*
8373 	 * We need to find a free sequence number both in the source and
8374 	 * in the destination directory for the exchange.
8375 	 */
8376 	ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx);
8377 	if (ret)
8378 		goto out_fail;
8379 	ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx);
8380 	if (ret)
8381 		goto out_fail;
8382 
8383 	BTRFS_I(old_inode)->dir_index = 0ULL;
8384 	BTRFS_I(new_inode)->dir_index = 0ULL;
8385 
8386 	/* Reference for the source. */
8387 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8388 		/* force full log commit if subvolume involved. */
8389 		btrfs_set_log_full_commit(trans);
8390 	} else {
8391 		ret = btrfs_insert_inode_ref(trans, dest, new_name, old_ino,
8392 					     btrfs_ino(BTRFS_I(new_dir)),
8393 					     old_idx);
8394 		if (ret)
8395 			goto out_fail;
8396 		need_abort = true;
8397 	}
8398 
8399 	/* And now for the dest. */
8400 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
8401 		/* force full log commit if subvolume involved. */
8402 		btrfs_set_log_full_commit(trans);
8403 	} else {
8404 		ret = btrfs_insert_inode_ref(trans, root, old_name, new_ino,
8405 					     btrfs_ino(BTRFS_I(old_dir)),
8406 					     new_idx);
8407 		if (ret) {
8408 			if (unlikely(need_abort))
8409 				btrfs_abort_transaction(trans, ret);
8410 			goto out_fail;
8411 		}
8412 	}
8413 
8414 	/* Update inode version and ctime/mtime. */
8415 	inode_inc_iversion(old_dir);
8416 	inode_inc_iversion(new_dir);
8417 	inode_inc_iversion(old_inode);
8418 	inode_inc_iversion(new_inode);
8419 	simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
8420 
8421 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID &&
8422 	    new_ino != BTRFS_FIRST_FREE_OBJECTID) {
8423 		/*
8424 		 * If we are renaming in the same directory (and it's not for
8425 		 * root entries) pin the log early to prevent any concurrent
8426 		 * task from logging the directory after we removed the old
8427 		 * entries and before we add the new entries, otherwise that
8428 		 * task can sync a log without any entry for the inodes we are
8429 		 * renaming and therefore replaying that log, if a power failure
8430 		 * happens after syncing the log, would result in deleting the
8431 		 * inodes.
8432 		 *
8433 		 * If the rename affects two different directories, we want to
8434 		 * make sure the that there's no log commit that contains
8435 		 * updates for only one of the directories but not for the
8436 		 * other.
8437 		 *
8438 		 * If we are renaming an entry for a root, we don't care about
8439 		 * log updates since we called btrfs_set_log_full_commit().
8440 		 */
8441 		btrfs_pin_log_trans(root);
8442 		btrfs_pin_log_trans(dest);
8443 		logs_pinned = true;
8444 	}
8445 
8446 	if (old_dentry->d_parent != new_dentry->d_parent) {
8447 		btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
8448 					BTRFS_I(old_inode), true);
8449 		btrfs_record_unlink_dir(trans, BTRFS_I(new_dir),
8450 					BTRFS_I(new_inode), true);
8451 	}
8452 
8453 	/* src is a subvolume */
8454 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8455 		ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry);
8456 		if (unlikely(ret)) {
8457 			btrfs_abort_transaction(trans, ret);
8458 			goto out_fail;
8459 		}
8460 	} else { /* src is an inode */
8461 		ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
8462 					   BTRFS_I(old_dentry->d_inode),
8463 					   old_name, &old_rename_ctx);
8464 		if (unlikely(ret)) {
8465 			btrfs_abort_transaction(trans, ret);
8466 			goto out_fail;
8467 		}
8468 		ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
8469 		if (unlikely(ret)) {
8470 			btrfs_abort_transaction(trans, ret);
8471 			goto out_fail;
8472 		}
8473 	}
8474 
8475 	/* dest is a subvolume */
8476 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
8477 		ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
8478 		if (unlikely(ret)) {
8479 			btrfs_abort_transaction(trans, ret);
8480 			goto out_fail;
8481 		}
8482 	} else { /* dest is an inode */
8483 		ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir),
8484 					   BTRFS_I(new_dentry->d_inode),
8485 					   new_name, &new_rename_ctx);
8486 		if (unlikely(ret)) {
8487 			btrfs_abort_transaction(trans, ret);
8488 			goto out_fail;
8489 		}
8490 		ret = btrfs_update_inode(trans, BTRFS_I(new_inode));
8491 		if (unlikely(ret)) {
8492 			btrfs_abort_transaction(trans, ret);
8493 			goto out_fail;
8494 		}
8495 	}
8496 
8497 	ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
8498 			     new_name, 0, old_idx);
8499 	if (unlikely(ret)) {
8500 		btrfs_abort_transaction(trans, ret);
8501 		goto out_fail;
8502 	}
8503 
8504 	ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode),
8505 			     old_name, 0, new_idx);
8506 	if (unlikely(ret)) {
8507 		btrfs_abort_transaction(trans, ret);
8508 		goto out_fail;
8509 	}
8510 
8511 	if (old_inode->i_nlink == 1)
8512 		BTRFS_I(old_inode)->dir_index = old_idx;
8513 	if (new_inode->i_nlink == 1)
8514 		BTRFS_I(new_inode)->dir_index = new_idx;
8515 
8516 	/*
8517 	 * Do the log updates for all inodes.
8518 	 *
8519 	 * If either entry is for a root we don't need to update the logs since
8520 	 * we've called btrfs_set_log_full_commit() before.
8521 	 */
8522 	if (logs_pinned) {
8523 		btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
8524 				   old_rename_ctx.index, new_dentry->d_parent);
8525 		btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir),
8526 				   new_rename_ctx.index, old_dentry->d_parent);
8527 	}
8528 
8529 out_fail:
8530 	if (logs_pinned) {
8531 		btrfs_end_log_trans(root);
8532 		btrfs_end_log_trans(dest);
8533 	}
8534 	ret2 = btrfs_end_transaction(trans);
8535 	ret = ret ? ret : ret2;
8536 out_notrans:
8537 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID ||
8538 	    old_ino == BTRFS_FIRST_FREE_OBJECTID)
8539 		up_read(&fs_info->subvol_sem);
8540 
8541 	fscrypt_free_filename(&new_fname);
8542 	fscrypt_free_filename(&old_fname);
8543 	return ret;
8544 }
8545 
new_whiteout_inode(struct mnt_idmap * idmap,struct inode * dir)8546 static struct inode *new_whiteout_inode(struct mnt_idmap *idmap,
8547 					struct inode *dir)
8548 {
8549 	struct inode *inode;
8550 
8551 	inode = new_inode(dir->i_sb);
8552 	if (inode) {
8553 		inode_init_owner(idmap, inode, dir,
8554 				 S_IFCHR | WHITEOUT_MODE);
8555 		inode->i_op = &btrfs_special_inode_operations;
8556 		init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
8557 	}
8558 	return inode;
8559 }
8560 
btrfs_rename(struct mnt_idmap * idmap,struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry,unsigned int flags)8561 static int btrfs_rename(struct mnt_idmap *idmap,
8562 			struct inode *old_dir, struct dentry *old_dentry,
8563 			struct inode *new_dir, struct dentry *new_dentry,
8564 			unsigned int flags)
8565 {
8566 	struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir);
8567 	struct btrfs_new_inode_args whiteout_args = {
8568 		.dir = old_dir,
8569 		.dentry = old_dentry,
8570 	};
8571 	struct btrfs_trans_handle *trans;
8572 	unsigned int trans_num_items;
8573 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
8574 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
8575 	struct inode *new_inode = d_inode(new_dentry);
8576 	struct inode *old_inode = d_inode(old_dentry);
8577 	struct btrfs_rename_ctx rename_ctx;
8578 	u64 index = 0;
8579 	int ret;
8580 	int ret2;
8581 	u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
8582 	struct fscrypt_name old_fname, new_fname;
8583 	bool logs_pinned = false;
8584 
8585 	if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
8586 		return -EPERM;
8587 
8588 	/* we only allow rename subvolume link between subvolumes */
8589 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
8590 		return -EXDEV;
8591 
8592 	if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
8593 	    (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID))
8594 		return -ENOTEMPTY;
8595 
8596 	if (S_ISDIR(old_inode->i_mode) && new_inode &&
8597 	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
8598 		return -ENOTEMPTY;
8599 
8600 	ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname);
8601 	if (ret)
8602 		return ret;
8603 
8604 	ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname);
8605 	if (ret) {
8606 		fscrypt_free_filename(&old_fname);
8607 		return ret;
8608 	}
8609 
8610 	/* check for collisions, even if the  name isn't there */
8611 	ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, &new_fname.disk_name);
8612 	if (ret) {
8613 		if (ret == -EEXIST) {
8614 			/* we shouldn't get
8615 			 * eexist without a new_inode */
8616 			if (WARN_ON(!new_inode)) {
8617 				goto out_fscrypt_names;
8618 			}
8619 		} else {
8620 			/* maybe -EOVERFLOW */
8621 			goto out_fscrypt_names;
8622 		}
8623 	}
8624 	ret = 0;
8625 
8626 	/*
8627 	 * we're using rename to replace one file with another.  Start IO on it
8628 	 * now so  we don't add too much work to the end of the transaction
8629 	 */
8630 	if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
8631 		filemap_flush(old_inode->i_mapping);
8632 
8633 	if (flags & RENAME_WHITEOUT) {
8634 		whiteout_args.inode = new_whiteout_inode(idmap, old_dir);
8635 		if (!whiteout_args.inode) {
8636 			ret = -ENOMEM;
8637 			goto out_fscrypt_names;
8638 		}
8639 		ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items);
8640 		if (ret)
8641 			goto out_whiteout_inode;
8642 	} else {
8643 		/* 1 to update the old parent inode. */
8644 		trans_num_items = 1;
8645 	}
8646 
8647 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8648 		/* Close the race window with snapshot create/destroy ioctl */
8649 		down_read(&fs_info->subvol_sem);
8650 		/*
8651 		 * 1 to remove old root ref
8652 		 * 1 to remove old root backref
8653 		 * 1 to add new root ref
8654 		 * 1 to add new root backref
8655 		 */
8656 		trans_num_items += 4;
8657 	} else {
8658 		/*
8659 		 * 1 to update inode
8660 		 * 1 to remove old inode ref
8661 		 * 1 to add new inode ref
8662 		 */
8663 		trans_num_items += 3;
8664 	}
8665 	/*
8666 	 * 1 to remove old dir item
8667 	 * 1 to remove old dir index
8668 	 * 1 to add new dir item
8669 	 * 1 to add new dir index
8670 	 */
8671 	trans_num_items += 4;
8672 	/* 1 to update new parent inode if it's not the same as the old parent */
8673 	if (new_dir != old_dir)
8674 		trans_num_items++;
8675 	if (new_inode) {
8676 		/*
8677 		 * 1 to update inode
8678 		 * 1 to remove inode ref
8679 		 * 1 to remove dir item
8680 		 * 1 to remove dir index
8681 		 * 1 to possibly add orphan item
8682 		 */
8683 		trans_num_items += 5;
8684 	}
8685 	trans = btrfs_start_transaction(root, trans_num_items);
8686 	if (IS_ERR(trans)) {
8687 		ret = PTR_ERR(trans);
8688 		goto out_notrans;
8689 	}
8690 
8691 	if (dest != root) {
8692 		ret = btrfs_record_root_in_trans(trans, dest);
8693 		if (ret)
8694 			goto out_fail;
8695 	}
8696 
8697 	ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index);
8698 	if (ret)
8699 		goto out_fail;
8700 
8701 	BTRFS_I(old_inode)->dir_index = 0ULL;
8702 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
8703 		/* force full log commit if subvolume involved. */
8704 		btrfs_set_log_full_commit(trans);
8705 	} else {
8706 		ret = btrfs_insert_inode_ref(trans, dest, &new_fname.disk_name,
8707 					     old_ino, btrfs_ino(BTRFS_I(new_dir)),
8708 					     index);
8709 		if (ret)
8710 			goto out_fail;
8711 	}
8712 
8713 	inode_inc_iversion(old_dir);
8714 	inode_inc_iversion(new_dir);
8715 	inode_inc_iversion(old_inode);
8716 	simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
8717 
8718 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
8719 		/*
8720 		 * If we are renaming in the same directory (and it's not a
8721 		 * root entry) pin the log to prevent any concurrent task from
8722 		 * logging the directory after we removed the old entry and
8723 		 * before we add the new entry, otherwise that task can sync
8724 		 * a log without any entry for the inode we are renaming and
8725 		 * therefore replaying that log, if a power failure happens
8726 		 * after syncing the log, would result in deleting the inode.
8727 		 *
8728 		 * If the rename affects two different directories, we want to
8729 		 * make sure the that there's no log commit that contains
8730 		 * updates for only one of the directories but not for the
8731 		 * other.
8732 		 *
8733 		 * If we are renaming an entry for a root, we don't care about
8734 		 * log updates since we called btrfs_set_log_full_commit().
8735 		 */
8736 		btrfs_pin_log_trans(root);
8737 		btrfs_pin_log_trans(dest);
8738 		logs_pinned = true;
8739 	}
8740 
8741 	if (old_dentry->d_parent != new_dentry->d_parent)
8742 		btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
8743 					BTRFS_I(old_inode), true);
8744 
8745 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
8746 		ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry);
8747 		if (unlikely(ret)) {
8748 			btrfs_abort_transaction(trans, ret);
8749 			goto out_fail;
8750 		}
8751 	} else {
8752 		ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
8753 					   BTRFS_I(d_inode(old_dentry)),
8754 					   &old_fname.disk_name, &rename_ctx);
8755 		if (unlikely(ret)) {
8756 			btrfs_abort_transaction(trans, ret);
8757 			goto out_fail;
8758 		}
8759 		ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
8760 		if (unlikely(ret)) {
8761 			btrfs_abort_transaction(trans, ret);
8762 			goto out_fail;
8763 		}
8764 	}
8765 
8766 	if (new_inode) {
8767 		inode_inc_iversion(new_inode);
8768 		if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
8769 			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
8770 			ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
8771 			if (unlikely(ret)) {
8772 				btrfs_abort_transaction(trans, ret);
8773 				goto out_fail;
8774 			}
8775 			BUG_ON(new_inode->i_nlink == 0);
8776 		} else {
8777 			ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir),
8778 						 BTRFS_I(d_inode(new_dentry)),
8779 						 &new_fname.disk_name);
8780 			if (unlikely(ret)) {
8781 				btrfs_abort_transaction(trans, ret);
8782 				goto out_fail;
8783 			}
8784 		}
8785 		if (new_inode->i_nlink == 0) {
8786 			ret = btrfs_orphan_add(trans,
8787 					BTRFS_I(d_inode(new_dentry)));
8788 			if (unlikely(ret)) {
8789 				btrfs_abort_transaction(trans, ret);
8790 				goto out_fail;
8791 			}
8792 		}
8793 	}
8794 
8795 	ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
8796 			     &new_fname.disk_name, 0, index);
8797 	if (unlikely(ret)) {
8798 		btrfs_abort_transaction(trans, ret);
8799 		goto out_fail;
8800 	}
8801 
8802 	if (old_inode->i_nlink == 1)
8803 		BTRFS_I(old_inode)->dir_index = index;
8804 
8805 	if (logs_pinned)
8806 		btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
8807 				   rename_ctx.index, new_dentry->d_parent);
8808 
8809 	if (flags & RENAME_WHITEOUT) {
8810 		ret = btrfs_create_new_inode(trans, &whiteout_args);
8811 		if (unlikely(ret)) {
8812 			btrfs_abort_transaction(trans, ret);
8813 			goto out_fail;
8814 		} else {
8815 			unlock_new_inode(whiteout_args.inode);
8816 			iput(whiteout_args.inode);
8817 			whiteout_args.inode = NULL;
8818 		}
8819 	}
8820 out_fail:
8821 	if (logs_pinned) {
8822 		btrfs_end_log_trans(root);
8823 		btrfs_end_log_trans(dest);
8824 	}
8825 	ret2 = btrfs_end_transaction(trans);
8826 	ret = ret ? ret : ret2;
8827 out_notrans:
8828 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
8829 		up_read(&fs_info->subvol_sem);
8830 	if (flags & RENAME_WHITEOUT)
8831 		btrfs_new_inode_args_destroy(&whiteout_args);
8832 out_whiteout_inode:
8833 	if (flags & RENAME_WHITEOUT)
8834 		iput(whiteout_args.inode);
8835 out_fscrypt_names:
8836 	fscrypt_free_filename(&old_fname);
8837 	fscrypt_free_filename(&new_fname);
8838 	return ret;
8839 }
8840 
btrfs_rename2(struct mnt_idmap * idmap,struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry,unsigned int flags)8841 static int btrfs_rename2(struct mnt_idmap *idmap, struct inode *old_dir,
8842 			 struct dentry *old_dentry, struct inode *new_dir,
8843 			 struct dentry *new_dentry, unsigned int flags)
8844 {
8845 	int ret;
8846 
8847 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
8848 		return -EINVAL;
8849 
8850 	if (flags & RENAME_EXCHANGE)
8851 		ret = btrfs_rename_exchange(old_dir, old_dentry, new_dir,
8852 					    new_dentry);
8853 	else
8854 		ret = btrfs_rename(idmap, old_dir, old_dentry, new_dir,
8855 				   new_dentry, flags);
8856 
8857 	btrfs_btree_balance_dirty(BTRFS_I(new_dir)->root->fs_info);
8858 
8859 	return ret;
8860 }
8861 
8862 struct btrfs_delalloc_work {
8863 	struct inode *inode;
8864 	struct completion completion;
8865 	struct list_head list;
8866 	struct btrfs_work work;
8867 };
8868 
btrfs_run_delalloc_work(struct btrfs_work * work)8869 static void btrfs_run_delalloc_work(struct btrfs_work *work)
8870 {
8871 	struct btrfs_delalloc_work *delalloc_work;
8872 	struct inode *inode;
8873 
8874 	delalloc_work = container_of(work, struct btrfs_delalloc_work,
8875 				     work);
8876 	inode = delalloc_work->inode;
8877 	filemap_flush(inode->i_mapping);
8878 	if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
8879 				&BTRFS_I(inode)->runtime_flags))
8880 		filemap_flush(inode->i_mapping);
8881 
8882 	iput(inode);
8883 	complete(&delalloc_work->completion);
8884 }
8885 
btrfs_alloc_delalloc_work(struct inode * inode)8886 static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode)
8887 {
8888 	struct btrfs_delalloc_work *work;
8889 
8890 	work = kmalloc_obj(*work, GFP_NOFS);
8891 	if (!work)
8892 		return NULL;
8893 
8894 	init_completion(&work->completion);
8895 	INIT_LIST_HEAD(&work->list);
8896 	work->inode = inode;
8897 	btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL);
8898 
8899 	return work;
8900 }
8901 
8902 /*
8903  * some fairly slow code that needs optimization. This walks the list
8904  * of all the inodes with pending delalloc and forces them to disk.
8905  */
start_delalloc_inodes(struct btrfs_root * root,long * nr_to_write,bool snapshot,bool in_reclaim_context)8906 static int start_delalloc_inodes(struct btrfs_root *root, long *nr_to_write,
8907 				 bool snapshot, bool in_reclaim_context)
8908 {
8909 	struct btrfs_delalloc_work *work, *next;
8910 	LIST_HEAD(works);
8911 	LIST_HEAD(splice);
8912 	int ret = 0;
8913 
8914 	mutex_lock(&root->delalloc_mutex);
8915 	spin_lock(&root->delalloc_lock);
8916 	list_splice_init(&root->delalloc_inodes, &splice);
8917 	while (!list_empty(&splice)) {
8918 		struct btrfs_inode *inode;
8919 		struct inode *tmp_inode;
8920 
8921 		inode = list_first_entry(&splice, struct btrfs_inode, delalloc_inodes);
8922 
8923 		list_move_tail(&inode->delalloc_inodes, &root->delalloc_inodes);
8924 
8925 		if (in_reclaim_context &&
8926 		    test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags))
8927 			continue;
8928 
8929 		tmp_inode = igrab(&inode->vfs_inode);
8930 		if (!tmp_inode) {
8931 			cond_resched_lock(&root->delalloc_lock);
8932 			continue;
8933 		}
8934 		spin_unlock(&root->delalloc_lock);
8935 
8936 		if (snapshot)
8937 			set_bit(BTRFS_INODE_SNAPSHOT_FLUSH, &inode->runtime_flags);
8938 		if (nr_to_write == NULL) {
8939 			work = btrfs_alloc_delalloc_work(tmp_inode);
8940 			if (!work) {
8941 				iput(tmp_inode);
8942 				ret = -ENOMEM;
8943 				goto out;
8944 			}
8945 			list_add_tail(&work->list, &works);
8946 			btrfs_queue_work(root->fs_info->flush_workers,
8947 					 &work->work);
8948 		} else {
8949 			ret = filemap_flush_nr(tmp_inode->i_mapping,
8950 					nr_to_write);
8951 			btrfs_add_delayed_iput(inode);
8952 
8953 			if (ret || *nr_to_write <= 0)
8954 				goto out;
8955 		}
8956 		cond_resched();
8957 		spin_lock(&root->delalloc_lock);
8958 	}
8959 	spin_unlock(&root->delalloc_lock);
8960 
8961 out:
8962 	list_for_each_entry_safe(work, next, &works, list) {
8963 		list_del_init(&work->list);
8964 		wait_for_completion(&work->completion);
8965 		kfree(work);
8966 	}
8967 
8968 	if (!list_empty(&splice)) {
8969 		spin_lock(&root->delalloc_lock);
8970 		list_splice_tail(&splice, &root->delalloc_inodes);
8971 		spin_unlock(&root->delalloc_lock);
8972 	}
8973 	mutex_unlock(&root->delalloc_mutex);
8974 	return ret;
8975 }
8976 
btrfs_start_delalloc_snapshot(struct btrfs_root * root,bool in_reclaim_context)8977 int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context)
8978 {
8979 	struct btrfs_fs_info *fs_info = root->fs_info;
8980 
8981 	if (BTRFS_FS_ERROR(fs_info))
8982 		return -EROFS;
8983 	return start_delalloc_inodes(root, NULL, true, in_reclaim_context);
8984 }
8985 
btrfs_start_delalloc_roots(struct btrfs_fs_info * fs_info,long nr,bool in_reclaim_context)8986 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
8987 			       bool in_reclaim_context)
8988 {
8989 	long *nr_to_write = nr == LONG_MAX ? NULL : &nr;
8990 	struct btrfs_root *root;
8991 	LIST_HEAD(splice);
8992 	int ret;
8993 
8994 	if (BTRFS_FS_ERROR(fs_info))
8995 		return -EROFS;
8996 
8997 	mutex_lock(&fs_info->delalloc_root_mutex);
8998 	spin_lock(&fs_info->delalloc_root_lock);
8999 	list_splice_init(&fs_info->delalloc_roots, &splice);
9000 	while (!list_empty(&splice)) {
9001 		root = list_first_entry(&splice, struct btrfs_root,
9002 					delalloc_root);
9003 		root = btrfs_grab_root(root);
9004 		BUG_ON(!root);
9005 		list_move_tail(&root->delalloc_root,
9006 			       &fs_info->delalloc_roots);
9007 		spin_unlock(&fs_info->delalloc_root_lock);
9008 
9009 		ret = start_delalloc_inodes(root, nr_to_write, false,
9010 				in_reclaim_context);
9011 		btrfs_put_root(root);
9012 		if (ret < 0 || nr <= 0)
9013 			goto out;
9014 		spin_lock(&fs_info->delalloc_root_lock);
9015 	}
9016 	spin_unlock(&fs_info->delalloc_root_lock);
9017 
9018 	ret = 0;
9019 out:
9020 	if (!list_empty(&splice)) {
9021 		spin_lock(&fs_info->delalloc_root_lock);
9022 		list_splice_tail(&splice, &fs_info->delalloc_roots);
9023 		spin_unlock(&fs_info->delalloc_root_lock);
9024 	}
9025 	mutex_unlock(&fs_info->delalloc_root_mutex);
9026 	return ret;
9027 }
9028 
btrfs_symlink(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,const char * symname)9029 static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
9030 			 struct dentry *dentry, const char *symname)
9031 {
9032 	struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
9033 	struct btrfs_trans_handle *trans;
9034 	struct btrfs_root *root = BTRFS_I(dir)->root;
9035 	struct btrfs_path *path;
9036 	struct btrfs_key key;
9037 	struct inode *inode;
9038 	struct btrfs_new_inode_args new_inode_args = {
9039 		.dir = dir,
9040 		.dentry = dentry,
9041 	};
9042 	unsigned int trans_num_items;
9043 	int ret;
9044 	int name_len;
9045 	int datasize;
9046 	unsigned long ptr;
9047 	struct btrfs_file_extent_item *ei;
9048 	struct extent_buffer *leaf;
9049 
9050 	name_len = strlen(symname);
9051 	/*
9052 	 * Symlinks utilize uncompressed inline extent data, which should not
9053 	 * reach block size.
9054 	 */
9055 	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
9056 	    name_len >= fs_info->sectorsize)
9057 		return -ENAMETOOLONG;
9058 
9059 	inode = new_inode(dir->i_sb);
9060 	if (!inode)
9061 		return -ENOMEM;
9062 	inode_init_owner(idmap, inode, dir, S_IFLNK | S_IRWXUGO);
9063 	inode->i_op = &btrfs_symlink_inode_operations;
9064 	inode_nohighmem(inode);
9065 	inode->i_mapping->a_ops = &btrfs_aops;
9066 	btrfs_i_size_write(BTRFS_I(inode), name_len);
9067 	inode_set_bytes(inode, name_len);
9068 
9069 	new_inode_args.inode = inode;
9070 	ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
9071 	if (ret)
9072 		goto out_inode;
9073 	/* 1 additional item for the inline extent */
9074 	trans_num_items++;
9075 
9076 	trans = btrfs_start_transaction(root, trans_num_items);
9077 	if (IS_ERR(trans)) {
9078 		ret = PTR_ERR(trans);
9079 		goto out_new_inode_args;
9080 	}
9081 
9082 	ret = btrfs_create_new_inode(trans, &new_inode_args);
9083 	if (ret)
9084 		goto out;
9085 
9086 	path = btrfs_alloc_path();
9087 	if (unlikely(!path)) {
9088 		ret = -ENOMEM;
9089 		btrfs_abort_transaction(trans, ret);
9090 		discard_new_inode(inode);
9091 		inode = NULL;
9092 		goto out;
9093 	}
9094 	key.objectid = btrfs_ino(BTRFS_I(inode));
9095 	key.type = BTRFS_EXTENT_DATA_KEY;
9096 	key.offset = 0;
9097 	datasize = btrfs_file_extent_calc_inline_size(name_len);
9098 	ret = btrfs_insert_empty_item(trans, root, path, &key, datasize);
9099 	if (unlikely(ret)) {
9100 		btrfs_abort_transaction(trans, ret);
9101 		btrfs_free_path(path);
9102 		discard_new_inode(inode);
9103 		inode = NULL;
9104 		goto out;
9105 	}
9106 	leaf = path->nodes[0];
9107 	ei = btrfs_item_ptr(leaf, path->slots[0],
9108 			    struct btrfs_file_extent_item);
9109 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
9110 	btrfs_set_file_extent_type(leaf, ei,
9111 				   BTRFS_FILE_EXTENT_INLINE);
9112 	btrfs_set_file_extent_encryption(leaf, ei, 0);
9113 	btrfs_set_file_extent_compression(leaf, ei, 0);
9114 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
9115 	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
9116 
9117 	ptr = btrfs_file_extent_inline_start(ei);
9118 	write_extent_buffer(leaf, symname, ptr, name_len);
9119 	btrfs_free_path(path);
9120 
9121 	d_instantiate_new(dentry, inode);
9122 	ret = 0;
9123 out:
9124 	btrfs_end_transaction(trans);
9125 	btrfs_btree_balance_dirty(fs_info);
9126 out_new_inode_args:
9127 	btrfs_new_inode_args_destroy(&new_inode_args);
9128 out_inode:
9129 	if (ret)
9130 		iput(inode);
9131 	return ret;
9132 }
9133 
insert_prealloc_file_extent(struct btrfs_trans_handle * trans_in,struct btrfs_inode * inode,struct btrfs_key * ins,u64 file_offset)9134 static struct btrfs_trans_handle *insert_prealloc_file_extent(
9135 				       struct btrfs_trans_handle *trans_in,
9136 				       struct btrfs_inode *inode,
9137 				       struct btrfs_key *ins,
9138 				       u64 file_offset)
9139 {
9140 	struct btrfs_file_extent_item stack_fi;
9141 	struct btrfs_replace_extent_info extent_info;
9142 	struct btrfs_trans_handle *trans = trans_in;
9143 	struct btrfs_path *path;
9144 	u64 start = ins->objectid;
9145 	u64 len = ins->offset;
9146 	u64 qgroup_released = 0;
9147 	int ret;
9148 
9149 	memset(&stack_fi, 0, sizeof(stack_fi));
9150 
9151 	btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_PREALLOC);
9152 	btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, start);
9153 	btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, len);
9154 	btrfs_set_stack_file_extent_num_bytes(&stack_fi, len);
9155 	btrfs_set_stack_file_extent_ram_bytes(&stack_fi, len);
9156 	btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE);
9157 	/* Encryption and other encoding is reserved and all 0 */
9158 
9159 	ret = btrfs_qgroup_release_data(inode, file_offset, len, &qgroup_released);
9160 	if (ret < 0)
9161 		return ERR_PTR(ret);
9162 
9163 	if (trans) {
9164 		ret = insert_reserved_file_extent(trans, inode,
9165 						  file_offset, &stack_fi,
9166 						  true, qgroup_released);
9167 		if (ret)
9168 			goto free_qgroup;
9169 		return trans;
9170 	}
9171 
9172 	extent_info.disk_offset = start;
9173 	extent_info.disk_len = len;
9174 	extent_info.data_offset = 0;
9175 	extent_info.data_len = len;
9176 	extent_info.file_offset = file_offset;
9177 	extent_info.extent_buf = (char *)&stack_fi;
9178 	extent_info.is_new_extent = true;
9179 	extent_info.update_times = true;
9180 	extent_info.qgroup_reserved = qgroup_released;
9181 	extent_info.insertions = 0;
9182 
9183 	path = btrfs_alloc_path();
9184 	if (!path) {
9185 		ret = -ENOMEM;
9186 		goto free_qgroup;
9187 	}
9188 
9189 	ret = btrfs_replace_file_extents(inode, path, file_offset,
9190 				     file_offset + len - 1, &extent_info,
9191 				     &trans);
9192 	btrfs_free_path(path);
9193 	if (ret)
9194 		goto free_qgroup;
9195 	return trans;
9196 
9197 free_qgroup:
9198 	/*
9199 	 * We have released qgroup data range at the beginning of the function,
9200 	 * and normally qgroup_released bytes will be freed when committing
9201 	 * transaction.
9202 	 * But if we error out early, we have to free what we have released
9203 	 * or we leak qgroup data reservation.
9204 	 */
9205 	btrfs_qgroup_free_refroot(inode->root->fs_info,
9206 			btrfs_root_id(inode->root), qgroup_released,
9207 			BTRFS_QGROUP_RSV_DATA);
9208 	return ERR_PTR(ret);
9209 }
9210 
__btrfs_prealloc_file_range(struct inode * inode,int mode,u64 start,u64 num_bytes,u64 min_size,loff_t actual_len,u64 * alloc_hint,struct btrfs_trans_handle * trans)9211 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
9212 				       u64 start, u64 num_bytes, u64 min_size,
9213 				       loff_t actual_len, u64 *alloc_hint,
9214 				       struct btrfs_trans_handle *trans)
9215 {
9216 	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
9217 	struct extent_map *em;
9218 	struct btrfs_root *root = BTRFS_I(inode)->root;
9219 	struct btrfs_key ins;
9220 	u64 cur_offset = start;
9221 	u64 clear_offset = start;
9222 	u64 i_size;
9223 	u64 cur_bytes;
9224 	u64 last_alloc = (u64)-1;
9225 	int ret = 0;
9226 	bool own_trans = true;
9227 	u64 end = start + num_bytes - 1;
9228 
9229 	if (trans)
9230 		own_trans = false;
9231 	while (num_bytes > 0) {
9232 		cur_bytes = min_t(u64, num_bytes, SZ_256M);
9233 		cur_bytes = max(cur_bytes, min_size);
9234 		/*
9235 		 * If we are severely fragmented we could end up with really
9236 		 * small allocations, so if the allocator is returning small
9237 		 * chunks lets make its job easier by only searching for those
9238 		 * sized chunks.
9239 		 */
9240 		cur_bytes = min(cur_bytes, last_alloc);
9241 		ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes,
9242 				min_size, 0, *alloc_hint, &ins, true, false);
9243 		if (ret)
9244 			break;
9245 
9246 		/*
9247 		 * We've reserved this space, and thus converted it from
9248 		 * ->bytes_may_use to ->bytes_reserved.  Any error that happens
9249 		 * from here on out we will only need to clear our reservation
9250 		 * for the remaining unreserved area, so advance our
9251 		 * clear_offset by our extent size.
9252 		 */
9253 		clear_offset += ins.offset;
9254 
9255 		last_alloc = ins.offset;
9256 		trans = insert_prealloc_file_extent(trans, BTRFS_I(inode),
9257 						    &ins, cur_offset);
9258 		/*
9259 		 * Now that we inserted the prealloc extent we can finally
9260 		 * decrement the number of reservations in the block group.
9261 		 * If we did it before, we could race with relocation and have
9262 		 * relocation miss the reserved extent, making it fail later.
9263 		 */
9264 		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
9265 		if (IS_ERR(trans)) {
9266 			ret = PTR_ERR(trans);
9267 			btrfs_free_reserved_extent(fs_info, ins.objectid,
9268 						   ins.offset, false);
9269 			break;
9270 		}
9271 
9272 		em = btrfs_alloc_extent_map();
9273 		if (!em) {
9274 			btrfs_drop_extent_map_range(BTRFS_I(inode), cur_offset,
9275 					    cur_offset + ins.offset - 1, false);
9276 			btrfs_set_inode_full_sync(BTRFS_I(inode));
9277 			goto next;
9278 		}
9279 
9280 		em->start = cur_offset;
9281 		em->len = ins.offset;
9282 		em->disk_bytenr = ins.objectid;
9283 		em->offset = 0;
9284 		em->disk_num_bytes = ins.offset;
9285 		em->ram_bytes = ins.offset;
9286 		em->flags |= EXTENT_FLAG_PREALLOC;
9287 		em->generation = trans->transid;
9288 
9289 		ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, true);
9290 		btrfs_free_extent_map(em);
9291 next:
9292 		num_bytes -= ins.offset;
9293 		cur_offset += ins.offset;
9294 		*alloc_hint = ins.objectid + ins.offset;
9295 
9296 		inode_inc_iversion(inode);
9297 		inode_set_ctime_current(inode);
9298 		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
9299 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
9300 		    (actual_len > inode->i_size) &&
9301 		    (cur_offset > inode->i_size)) {
9302 			if (cur_offset > actual_len)
9303 				i_size = actual_len;
9304 			else
9305 				i_size = cur_offset;
9306 			i_size_write(inode, i_size);
9307 			btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
9308 		}
9309 
9310 		ret = btrfs_update_inode(trans, BTRFS_I(inode));
9311 
9312 		if (unlikely(ret)) {
9313 			btrfs_abort_transaction(trans, ret);
9314 			if (own_trans)
9315 				btrfs_end_transaction(trans);
9316 			break;
9317 		}
9318 
9319 		if (own_trans) {
9320 			btrfs_end_transaction(trans);
9321 			trans = NULL;
9322 		}
9323 	}
9324 	if (clear_offset < end)
9325 		btrfs_free_reserved_data_space(BTRFS_I(inode), NULL, clear_offset,
9326 			end - clear_offset + 1);
9327 	return ret;
9328 }
9329 
btrfs_prealloc_file_range(struct inode * inode,int mode,u64 start,u64 num_bytes,u64 min_size,loff_t actual_len,u64 * alloc_hint)9330 int btrfs_prealloc_file_range(struct inode *inode, int mode,
9331 			      u64 start, u64 num_bytes, u64 min_size,
9332 			      loff_t actual_len, u64 *alloc_hint)
9333 {
9334 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9335 					   min_size, actual_len, alloc_hint,
9336 					   NULL);
9337 }
9338 
btrfs_prealloc_file_range_trans(struct inode * inode,struct btrfs_trans_handle * trans,int mode,u64 start,u64 num_bytes,u64 min_size,loff_t actual_len,u64 * alloc_hint)9339 int btrfs_prealloc_file_range_trans(struct inode *inode,
9340 				    struct btrfs_trans_handle *trans, int mode,
9341 				    u64 start, u64 num_bytes, u64 min_size,
9342 				    loff_t actual_len, u64 *alloc_hint)
9343 {
9344 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9345 					   min_size, actual_len, alloc_hint, trans);
9346 }
9347 
9348 /*
9349  * NOTE: in case you are adding MAY_EXEC check for directories:
9350  * we are marking them with IOP_FASTPERM_MAY_EXEC, allowing path lookup to
9351  * elide calls here.
9352  */
btrfs_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)9353 static int btrfs_permission(struct mnt_idmap *idmap,
9354 			    struct inode *inode, int mask)
9355 {
9356 	struct btrfs_root *root = BTRFS_I(inode)->root;
9357 	umode_t mode = inode->i_mode;
9358 
9359 	if (mask & MAY_WRITE &&
9360 	    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
9361 		if (btrfs_root_readonly(root))
9362 			return -EROFS;
9363 		if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
9364 			return -EACCES;
9365 	}
9366 	return generic_permission(idmap, inode, mask);
9367 }
9368 
btrfs_tmpfile(struct mnt_idmap * idmap,struct inode * dir,struct file * file,umode_t mode)9369 static int btrfs_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
9370 			 struct file *file, umode_t mode)
9371 {
9372 	struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
9373 	struct btrfs_trans_handle *trans;
9374 	struct btrfs_root *root = BTRFS_I(dir)->root;
9375 	struct inode *inode;
9376 	struct btrfs_new_inode_args new_inode_args = {
9377 		.dir = dir,
9378 		.dentry = file->f_path.dentry,
9379 		.orphan = true,
9380 	};
9381 	unsigned int trans_num_items;
9382 	int ret;
9383 
9384 	inode = new_inode(dir->i_sb);
9385 	if (!inode)
9386 		return -ENOMEM;
9387 	inode_init_owner(idmap, inode, dir, mode);
9388 	inode->i_fop = &btrfs_file_operations;
9389 	inode->i_op = &btrfs_file_inode_operations;
9390 	inode->i_mapping->a_ops = &btrfs_aops;
9391 
9392 	new_inode_args.inode = inode;
9393 	ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
9394 	if (ret)
9395 		goto out_inode;
9396 
9397 	trans = btrfs_start_transaction(root, trans_num_items);
9398 	if (IS_ERR(trans)) {
9399 		ret = PTR_ERR(trans);
9400 		goto out_new_inode_args;
9401 	}
9402 
9403 	ret = btrfs_create_new_inode(trans, &new_inode_args);
9404 
9405 	/*
9406 	 * We set number of links to 0 in btrfs_create_new_inode(), and here we
9407 	 * set it to 1 because d_tmpfile() will issue a warning if the count is
9408 	 * 0, through:
9409 	 *
9410 	 *    d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
9411 	 */
9412 	set_nlink(inode, 1);
9413 
9414 	if (!ret) {
9415 		d_tmpfile(file, inode);
9416 		unlock_new_inode(inode);
9417 		mark_inode_dirty(inode);
9418 	}
9419 
9420 	btrfs_end_transaction(trans);
9421 	btrfs_btree_balance_dirty(fs_info);
9422 out_new_inode_args:
9423 	btrfs_new_inode_args_destroy(&new_inode_args);
9424 out_inode:
9425 	if (ret)
9426 		iput(inode);
9427 	return finish_open_simple(file, ret);
9428 }
9429 
btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info * fs_info,int compress_type)9430 int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
9431 					     int compress_type)
9432 {
9433 	switch (compress_type) {
9434 	case BTRFS_COMPRESS_NONE:
9435 		return BTRFS_ENCODED_IO_COMPRESSION_NONE;
9436 	case BTRFS_COMPRESS_ZLIB:
9437 		return BTRFS_ENCODED_IO_COMPRESSION_ZLIB;
9438 	case BTRFS_COMPRESS_LZO:
9439 		/*
9440 		 * The LZO format depends on the sector size. 64K is the maximum
9441 		 * sector size that we support.
9442 		 */
9443 		if (fs_info->sectorsize < SZ_4K || fs_info->sectorsize > SZ_64K)
9444 			return -EINVAL;
9445 		return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K +
9446 		       (fs_info->sectorsize_bits - 12);
9447 	case BTRFS_COMPRESS_ZSTD:
9448 		return BTRFS_ENCODED_IO_COMPRESSION_ZSTD;
9449 	default:
9450 		return -EUCLEAN;
9451 	}
9452 }
9453 
btrfs_encoded_read_inline(struct kiocb * iocb,struct iov_iter * iter,u64 start,u64 lockend,struct extent_state ** cached_state,u64 extent_start,size_t count,struct btrfs_ioctl_encoded_io_args * encoded,bool * unlocked)9454 static ssize_t btrfs_encoded_read_inline(
9455 				struct kiocb *iocb,
9456 				struct iov_iter *iter, u64 start,
9457 				u64 lockend,
9458 				struct extent_state **cached_state,
9459 				u64 extent_start, size_t count,
9460 				struct btrfs_ioctl_encoded_io_args *encoded,
9461 				bool *unlocked)
9462 {
9463 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
9464 	struct btrfs_root *root = inode->root;
9465 	struct btrfs_fs_info *fs_info = root->fs_info;
9466 	struct extent_io_tree *io_tree = &inode->io_tree;
9467 	BTRFS_PATH_AUTO_FREE(path);
9468 	struct extent_buffer *leaf;
9469 	struct btrfs_file_extent_item *item;
9470 	u64 ram_bytes;
9471 	unsigned long ptr;
9472 	void *tmp;
9473 	ssize_t ret;
9474 	const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
9475 
9476 	path = btrfs_alloc_path();
9477 	if (!path)
9478 		return -ENOMEM;
9479 
9480 	path->nowait = nowait;
9481 
9482 	ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
9483 				       extent_start, 0);
9484 	if (ret) {
9485 		if (unlikely(ret > 0)) {
9486 			/* The extent item disappeared? */
9487 			return -EIO;
9488 		}
9489 		return ret;
9490 	}
9491 	leaf = path->nodes[0];
9492 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
9493 
9494 	ram_bytes = btrfs_file_extent_ram_bytes(leaf, item);
9495 	ptr = btrfs_file_extent_inline_start(item);
9496 
9497 	encoded->len = min_t(u64, extent_start + ram_bytes,
9498 			     inode->vfs_inode.i_size) - iocb->ki_pos;
9499 	ret = btrfs_encoded_io_compression_from_extent(fs_info,
9500 				 btrfs_file_extent_compression(leaf, item));
9501 	if (ret < 0)
9502 		return ret;
9503 	encoded->compression = ret;
9504 	if (encoded->compression) {
9505 		size_t inline_size;
9506 
9507 		inline_size = btrfs_file_extent_inline_item_len(leaf,
9508 								path->slots[0]);
9509 		if (inline_size > count)
9510 			return -ENOBUFS;
9511 
9512 		count = inline_size;
9513 		encoded->unencoded_len = ram_bytes;
9514 		encoded->unencoded_offset = iocb->ki_pos - extent_start;
9515 	} else {
9516 		count = min_t(u64, count, encoded->len);
9517 		encoded->len = count;
9518 		encoded->unencoded_len = count;
9519 		ptr += iocb->ki_pos - extent_start;
9520 	}
9521 
9522 	tmp = kmalloc(count, GFP_NOFS);
9523 	if (!tmp)
9524 		return -ENOMEM;
9525 
9526 	read_extent_buffer(leaf, tmp, ptr, count);
9527 	btrfs_release_path(path);
9528 	btrfs_unlock_extent(io_tree, start, lockend, cached_state);
9529 	btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9530 	*unlocked = true;
9531 
9532 	ret = copy_to_iter(tmp, count, iter);
9533 	if (ret != count)
9534 		ret = -EFAULT;
9535 	kfree(tmp);
9536 
9537 	return ret;
9538 }
9539 
9540 struct btrfs_encoded_read_private {
9541 	struct completion *sync_reads;
9542 	void *uring_ctx;
9543 	refcount_t pending_refs;
9544 	blk_status_t status;
9545 };
9546 
btrfs_encoded_read_endio(struct btrfs_bio * bbio)9547 static void btrfs_encoded_read_endio(struct btrfs_bio *bbio)
9548 {
9549 	struct btrfs_encoded_read_private *priv = bbio->private;
9550 
9551 	if (bbio->bio.bi_status) {
9552 		/*
9553 		 * The memory barrier implied by the refcount_dec_and_test() here
9554 		 * pairs with the memory barrier implied by the refcount_dec_and_test()
9555 		 * in btrfs_encoded_read_regular_fill_pages() to ensure that
9556 		 * this write is observed before the load of status in
9557 		 * btrfs_encoded_read_regular_fill_pages().
9558 		 */
9559 		WRITE_ONCE(priv->status, bbio->bio.bi_status);
9560 	}
9561 	if (refcount_dec_and_test(&priv->pending_refs)) {
9562 		int err = blk_status_to_errno(READ_ONCE(priv->status));
9563 
9564 		if (priv->uring_ctx) {
9565 			btrfs_uring_read_extent_endio(priv->uring_ctx, err);
9566 			kfree(priv);
9567 		} else {
9568 			complete(priv->sync_reads);
9569 		}
9570 	}
9571 	bio_put(&bbio->bio);
9572 }
9573 
btrfs_encoded_read_regular_fill_pages(struct btrfs_inode * inode,u64 disk_bytenr,u64 disk_io_size,struct page ** pages,void * uring_ctx)9574 int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
9575 					  u64 disk_bytenr, u64 disk_io_size,
9576 					  struct page **pages, void *uring_ctx)
9577 {
9578 	struct btrfs_encoded_read_private *priv, sync_priv;
9579 	struct completion sync_reads;
9580 	unsigned long i = 0;
9581 	struct btrfs_bio *bbio;
9582 	int ret;
9583 
9584 	/*
9585 	 * Fast path for synchronous reads which completes in this call, io_uring
9586 	 * needs longer time span.
9587 	 */
9588 	if (uring_ctx) {
9589 		priv = kmalloc_obj(struct btrfs_encoded_read_private, GFP_NOFS);
9590 		if (!priv)
9591 			return -ENOMEM;
9592 	} else {
9593 		priv = &sync_priv;
9594 		init_completion(&sync_reads);
9595 		priv->sync_reads = &sync_reads;
9596 	}
9597 
9598 	refcount_set(&priv->pending_refs, 1);
9599 	priv->status = 0;
9600 	priv->uring_ctx = uring_ctx;
9601 
9602 	bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, inode, 0,
9603 			       btrfs_encoded_read_endio, priv);
9604 	bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
9605 
9606 	do {
9607 		size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE);
9608 
9609 		if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) {
9610 			refcount_inc(&priv->pending_refs);
9611 			btrfs_submit_bbio(bbio, 0);
9612 
9613 			bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, inode, 0,
9614 					       btrfs_encoded_read_endio, priv);
9615 			bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
9616 			continue;
9617 		}
9618 
9619 		i++;
9620 		disk_bytenr += bytes;
9621 		disk_io_size -= bytes;
9622 	} while (disk_io_size);
9623 
9624 	refcount_inc(&priv->pending_refs);
9625 	btrfs_submit_bbio(bbio, 0);
9626 
9627 	if (uring_ctx) {
9628 		if (refcount_dec_and_test(&priv->pending_refs)) {
9629 			ret = blk_status_to_errno(READ_ONCE(priv->status));
9630 			btrfs_uring_read_extent_endio(uring_ctx, ret);
9631 			kfree(priv);
9632 			return ret;
9633 		}
9634 
9635 		return -EIOCBQUEUED;
9636 	} else {
9637 		if (!refcount_dec_and_test(&priv->pending_refs))
9638 			wait_for_completion_io(&sync_reads);
9639 		/* See btrfs_encoded_read_endio() for ordering. */
9640 		return blk_status_to_errno(READ_ONCE(priv->status));
9641 	}
9642 }
9643 
btrfs_encoded_read_regular(struct kiocb * iocb,struct iov_iter * iter,u64 start,u64 lockend,struct extent_state ** cached_state,u64 disk_bytenr,u64 disk_io_size,size_t count,bool compressed,bool * unlocked)9644 ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, struct iov_iter *iter,
9645 				   u64 start, u64 lockend,
9646 				   struct extent_state **cached_state,
9647 				   u64 disk_bytenr, u64 disk_io_size,
9648 				   size_t count, bool compressed, bool *unlocked)
9649 {
9650 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
9651 	struct extent_io_tree *io_tree = &inode->io_tree;
9652 	struct page **pages;
9653 	unsigned long nr_pages, i;
9654 	u64 cur;
9655 	size_t page_offset;
9656 	ssize_t ret;
9657 
9658 	nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE);
9659 	pages = kzalloc_objs(struct page *, nr_pages, GFP_NOFS);
9660 	if (!pages)
9661 		return -ENOMEM;
9662 	ret = btrfs_alloc_page_array(nr_pages, pages, false);
9663 	if (ret) {
9664 		ret = -ENOMEM;
9665 		goto out;
9666 		}
9667 
9668 	ret = btrfs_encoded_read_regular_fill_pages(inode, disk_bytenr,
9669 						    disk_io_size, pages, NULL);
9670 	if (ret)
9671 		goto out;
9672 
9673 	btrfs_unlock_extent(io_tree, start, lockend, cached_state);
9674 	btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9675 	*unlocked = true;
9676 
9677 	if (compressed) {
9678 		i = 0;
9679 		page_offset = 0;
9680 	} else {
9681 		i = (iocb->ki_pos - start) >> PAGE_SHIFT;
9682 		page_offset = (iocb->ki_pos - start) & (PAGE_SIZE - 1);
9683 	}
9684 	cur = 0;
9685 	while (cur < count) {
9686 		size_t bytes = min_t(size_t, count - cur,
9687 				     PAGE_SIZE - page_offset);
9688 
9689 		if (copy_page_to_iter(pages[i], page_offset, bytes,
9690 				      iter) != bytes) {
9691 			ret = -EFAULT;
9692 			goto out;
9693 		}
9694 		i++;
9695 		cur += bytes;
9696 		page_offset = 0;
9697 	}
9698 	ret = count;
9699 out:
9700 	for (i = 0; i < nr_pages; i++) {
9701 		if (pages[i])
9702 			__free_page(pages[i]);
9703 	}
9704 	kfree(pages);
9705 	return ret;
9706 }
9707 
btrfs_encoded_read(struct kiocb * iocb,struct iov_iter * iter,struct btrfs_ioctl_encoded_io_args * encoded,struct extent_state ** cached_state,u64 * disk_bytenr,u64 * disk_io_size)9708 ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
9709 			   struct btrfs_ioctl_encoded_io_args *encoded,
9710 			   struct extent_state **cached_state,
9711 			   u64 *disk_bytenr, u64 *disk_io_size)
9712 {
9713 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
9714 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
9715 	struct extent_io_tree *io_tree = &inode->io_tree;
9716 	ssize_t ret;
9717 	size_t count = iov_iter_count(iter);
9718 	u64 start, lockend;
9719 	struct extent_map *em;
9720 	const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
9721 	bool unlocked = false;
9722 
9723 	file_accessed(iocb->ki_filp);
9724 
9725 	ret = btrfs_inode_lock(inode,
9726 			       BTRFS_ILOCK_SHARED | (nowait ? BTRFS_ILOCK_TRY : 0));
9727 	if (ret)
9728 		return ret;
9729 
9730 	if (iocb->ki_pos >= inode->vfs_inode.i_size) {
9731 		btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9732 		return 0;
9733 	}
9734 	start = ALIGN_DOWN(iocb->ki_pos, fs_info->sectorsize);
9735 	/*
9736 	 * We don't know how long the extent containing iocb->ki_pos is, but if
9737 	 * it's compressed we know that it won't be longer than this.
9738 	 */
9739 	lockend = start + BTRFS_MAX_UNCOMPRESSED - 1;
9740 
9741 	if (nowait) {
9742 		struct btrfs_ordered_extent *ordered;
9743 
9744 		if (filemap_range_needs_writeback(inode->vfs_inode.i_mapping,
9745 						  start, lockend)) {
9746 			ret = -EAGAIN;
9747 			goto out_unlock_inode;
9748 		}
9749 
9750 		if (!btrfs_try_lock_extent(io_tree, start, lockend, cached_state)) {
9751 			ret = -EAGAIN;
9752 			goto out_unlock_inode;
9753 		}
9754 
9755 		ordered = btrfs_lookup_ordered_range(inode, start,
9756 						     lockend - start + 1);
9757 		if (ordered) {
9758 			btrfs_put_ordered_extent(ordered);
9759 			btrfs_unlock_extent(io_tree, start, lockend, cached_state);
9760 			ret = -EAGAIN;
9761 			goto out_unlock_inode;
9762 		}
9763 	} else {
9764 		for (;;) {
9765 			struct btrfs_ordered_extent *ordered;
9766 
9767 			ret = btrfs_wait_ordered_range(inode, start,
9768 						       lockend - start + 1);
9769 			if (ret)
9770 				goto out_unlock_inode;
9771 
9772 			btrfs_lock_extent(io_tree, start, lockend, cached_state);
9773 			ordered = btrfs_lookup_ordered_range(inode, start,
9774 							     lockend - start + 1);
9775 			if (!ordered)
9776 				break;
9777 			btrfs_put_ordered_extent(ordered);
9778 			btrfs_unlock_extent(io_tree, start, lockend, cached_state);
9779 			cond_resched();
9780 		}
9781 	}
9782 
9783 	em = btrfs_get_extent(inode, NULL, start, lockend - start + 1);
9784 	if (IS_ERR(em)) {
9785 		ret = PTR_ERR(em);
9786 		goto out_unlock_extent;
9787 	}
9788 
9789 	if (em->disk_bytenr == EXTENT_MAP_INLINE) {
9790 		u64 extent_start = em->start;
9791 
9792 		/*
9793 		 * For inline extents we get everything we need out of the
9794 		 * extent item.
9795 		 */
9796 		btrfs_free_extent_map(em);
9797 		em = NULL;
9798 		ret = btrfs_encoded_read_inline(iocb, iter, start, lockend,
9799 						cached_state, extent_start,
9800 						count, encoded, &unlocked);
9801 		goto out_unlock_extent;
9802 	}
9803 
9804 	/*
9805 	 * We only want to return up to EOF even if the extent extends beyond
9806 	 * that.
9807 	 */
9808 	encoded->len = min_t(u64, btrfs_extent_map_end(em),
9809 			     inode->vfs_inode.i_size) - iocb->ki_pos;
9810 	if (em->disk_bytenr == EXTENT_MAP_HOLE ||
9811 	    (em->flags & EXTENT_FLAG_PREALLOC)) {
9812 		*disk_bytenr = EXTENT_MAP_HOLE;
9813 		count = min_t(u64, count, encoded->len);
9814 		encoded->len = count;
9815 		encoded->unencoded_len = count;
9816 	} else if (btrfs_extent_map_is_compressed(em)) {
9817 		*disk_bytenr = em->disk_bytenr;
9818 		/*
9819 		 * Bail if the buffer isn't large enough to return the whole
9820 		 * compressed extent.
9821 		 */
9822 		if (em->disk_num_bytes > count) {
9823 			ret = -ENOBUFS;
9824 			goto out_em;
9825 		}
9826 		*disk_io_size = em->disk_num_bytes;
9827 		count = em->disk_num_bytes;
9828 		encoded->unencoded_len = em->ram_bytes;
9829 		encoded->unencoded_offset = iocb->ki_pos - (em->start - em->offset);
9830 		ret = btrfs_encoded_io_compression_from_extent(fs_info,
9831 					       btrfs_extent_map_compression(em));
9832 		if (ret < 0)
9833 			goto out_em;
9834 		encoded->compression = ret;
9835 	} else {
9836 		*disk_bytenr = btrfs_extent_map_block_start(em) + (start - em->start);
9837 		if (encoded->len > count)
9838 			encoded->len = count;
9839 		/*
9840 		 * Don't read beyond what we locked. This also limits the page
9841 		 * allocations that we'll do.
9842 		 */
9843 		*disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start;
9844 		count = start + *disk_io_size - iocb->ki_pos;
9845 		encoded->len = count;
9846 		encoded->unencoded_len = count;
9847 		*disk_io_size = ALIGN(*disk_io_size, fs_info->sectorsize);
9848 	}
9849 	btrfs_free_extent_map(em);
9850 	em = NULL;
9851 
9852 	if (*disk_bytenr == EXTENT_MAP_HOLE) {
9853 		btrfs_unlock_extent(io_tree, start, lockend, cached_state);
9854 		btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9855 		unlocked = true;
9856 		ret = iov_iter_zero(count, iter);
9857 		if (ret != count)
9858 			ret = -EFAULT;
9859 	} else {
9860 		ret = -EIOCBQUEUED;
9861 		goto out_unlock_extent;
9862 	}
9863 
9864 out_em:
9865 	btrfs_free_extent_map(em);
9866 out_unlock_extent:
9867 	/* Leave inode and extent locked if we need to do a read. */
9868 	if (!unlocked && ret != -EIOCBQUEUED)
9869 		btrfs_unlock_extent(io_tree, start, lockend, cached_state);
9870 out_unlock_inode:
9871 	if (!unlocked && ret != -EIOCBQUEUED)
9872 		btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9873 	return ret;
9874 }
9875 
btrfs_do_encoded_write(struct kiocb * iocb,struct iov_iter * from,const struct btrfs_ioctl_encoded_io_args * encoded)9876 ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
9877 			       const struct btrfs_ioctl_encoded_io_args *encoded)
9878 {
9879 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
9880 	struct btrfs_root *root = inode->root;
9881 	struct btrfs_fs_info *fs_info = root->fs_info;
9882 	struct extent_io_tree *io_tree = &inode->io_tree;
9883 	struct extent_changeset *data_reserved = NULL;
9884 	struct extent_state *cached_state = NULL;
9885 	struct btrfs_ordered_extent *ordered;
9886 	struct btrfs_file_extent file_extent;
9887 	struct compressed_bio *cb = NULL;
9888 	int compression;
9889 	size_t orig_count;
9890 	const u32 min_folio_size = btrfs_min_folio_size(fs_info);
9891 	const u32 blocksize = fs_info->sectorsize;
9892 	u64 start, end;
9893 	u64 num_bytes, ram_bytes, disk_num_bytes;
9894 	struct btrfs_key ins;
9895 	bool extent_reserved = false;
9896 	struct extent_map *em;
9897 	ssize_t ret;
9898 
9899 	switch (encoded->compression) {
9900 	case BTRFS_ENCODED_IO_COMPRESSION_ZLIB:
9901 		compression = BTRFS_COMPRESS_ZLIB;
9902 		break;
9903 	case BTRFS_ENCODED_IO_COMPRESSION_ZSTD:
9904 		compression = BTRFS_COMPRESS_ZSTD;
9905 		break;
9906 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K:
9907 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K:
9908 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K:
9909 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K:
9910 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K:
9911 		/* The sector size must match for LZO. */
9912 		if (encoded->compression -
9913 		    BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 12 !=
9914 		    fs_info->sectorsize_bits)
9915 			return -EINVAL;
9916 		compression = BTRFS_COMPRESS_LZO;
9917 		break;
9918 	default:
9919 		return -EINVAL;
9920 	}
9921 	if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE)
9922 		return -EINVAL;
9923 
9924 	/*
9925 	 * Compressed extents should always have checksums, so error out if we
9926 	 * have a NOCOW file or inode was created while mounted with NODATASUM.
9927 	 */
9928 	if (inode->flags & BTRFS_INODE_NODATASUM)
9929 		return -EINVAL;
9930 
9931 	orig_count = iov_iter_count(from);
9932 
9933 	/* The extent size must be sane. */
9934 	if (encoded->unencoded_len > BTRFS_MAX_UNCOMPRESSED ||
9935 	    orig_count > BTRFS_MAX_COMPRESSED || orig_count == 0)
9936 		return -EINVAL;
9937 
9938 	/*
9939 	 * The compressed data must be smaller than the decompressed data.
9940 	 *
9941 	 * It's of course possible for data to compress to larger or the same
9942 	 * size, but the buffered I/O path falls back to no compression for such
9943 	 * data, and we don't want to break any assumptions by creating these
9944 	 * extents.
9945 	 *
9946 	 * Note that this is less strict than the current check we have that the
9947 	 * compressed data must be at least one sector smaller than the
9948 	 * decompressed data. We only want to enforce the weaker requirement
9949 	 * from old kernels that it is at least one byte smaller.
9950 	 */
9951 	if (orig_count >= encoded->unencoded_len)
9952 		return -EINVAL;
9953 
9954 	/* The extent must start on a sector boundary. */
9955 	start = iocb->ki_pos;
9956 	if (!IS_ALIGNED(start, fs_info->sectorsize))
9957 		return -EINVAL;
9958 
9959 	/*
9960 	 * The extent must end on a sector boundary. However, we allow a write
9961 	 * which ends at or extends i_size to have an unaligned length; we round
9962 	 * up the extent size and set i_size to the unaligned end.
9963 	 */
9964 	if (start + encoded->len < inode->vfs_inode.i_size &&
9965 	    !IS_ALIGNED(start + encoded->len, fs_info->sectorsize))
9966 		return -EINVAL;
9967 
9968 	/* Finally, the offset in the unencoded data must be sector-aligned. */
9969 	if (!IS_ALIGNED(encoded->unencoded_offset, fs_info->sectorsize))
9970 		return -EINVAL;
9971 
9972 	num_bytes = ALIGN(encoded->len, fs_info->sectorsize);
9973 	ram_bytes = ALIGN(encoded->unencoded_len, fs_info->sectorsize);
9974 	end = start + num_bytes - 1;
9975 
9976 	/*
9977 	 * If the extent cannot be inline, the compressed data on disk must be
9978 	 * sector-aligned. For convenience, we extend it with zeroes if it
9979 	 * isn't.
9980 	 */
9981 	disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize);
9982 
9983 	cb = btrfs_alloc_compressed_write(inode, start, num_bytes);
9984 	for (int i = 0; i * min_folio_size < disk_num_bytes; i++) {
9985 		struct folio *folio;
9986 		size_t bytes = min(min_folio_size, iov_iter_count(from));
9987 		char *kaddr;
9988 
9989 		folio = btrfs_alloc_compr_folio(fs_info);
9990 		if (!folio) {
9991 			ret = -ENOMEM;
9992 			goto out_cb;
9993 		}
9994 		kaddr = kmap_local_folio(folio, 0);
9995 		ret = copy_from_iter(kaddr, bytes, from);
9996 		kunmap_local(kaddr);
9997 		if (ret != bytes) {
9998 			folio_put(folio);
9999 			ret = -EFAULT;
10000 			goto out_cb;
10001 		}
10002 		if (!IS_ALIGNED(bytes, blocksize))
10003 			folio_zero_range(folio, bytes, round_up(bytes, blocksize) - bytes);
10004 		ret = bio_add_folio(&cb->bbio.bio, folio, round_up(bytes, blocksize), 0);
10005 		if (unlikely(!ret)) {
10006 			folio_put(folio);
10007 			ret = -EINVAL;
10008 			goto out_cb;
10009 		}
10010 	}
10011 	ASSERT(cb->bbio.bio.bi_iter.bi_size == disk_num_bytes);
10012 
10013 	for (;;) {
10014 		ret = btrfs_wait_ordered_range(inode, start, num_bytes);
10015 		if (ret)
10016 			goto out_cb;
10017 		ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping,
10018 						    start >> PAGE_SHIFT,
10019 						    end >> PAGE_SHIFT);
10020 		if (ret)
10021 			goto out_cb;
10022 		btrfs_lock_extent(io_tree, start, end, &cached_state);
10023 		ordered = btrfs_lookup_ordered_range(inode, start, num_bytes);
10024 		if (!ordered &&
10025 		    !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end))
10026 			break;
10027 		if (ordered)
10028 			btrfs_put_ordered_extent(ordered);
10029 		btrfs_unlock_extent(io_tree, start, end, &cached_state);
10030 		cond_resched();
10031 	}
10032 
10033 	/*
10034 	 * We don't use the higher-level delalloc space functions because our
10035 	 * num_bytes and disk_num_bytes are different.
10036 	 */
10037 	ret = btrfs_alloc_data_chunk_ondemand(inode, disk_num_bytes);
10038 	if (ret)
10039 		goto out_unlock;
10040 	ret = btrfs_qgroup_reserve_data(inode, &data_reserved, start, num_bytes);
10041 	if (ret)
10042 		goto out_free_data_space;
10043 	ret = btrfs_delalloc_reserve_metadata(inode, num_bytes, disk_num_bytes,
10044 					      false);
10045 	if (ret)
10046 		goto out_qgroup_free_data;
10047 
10048 	/* Try an inline extent first. */
10049 	if (encoded->unencoded_len == encoded->len &&
10050 	    encoded->unencoded_offset == 0 &&
10051 	    can_cow_file_range_inline(inode, start, encoded->len, orig_count)) {
10052 		ret = __cow_file_range_inline(inode, encoded->len,
10053 					      orig_count, compression,
10054 					      bio_first_folio_all(&cb->bbio.bio),
10055 					      true);
10056 		if (ret <= 0) {
10057 			if (ret == 0)
10058 				ret = orig_count;
10059 			goto out_delalloc_release;
10060 		}
10061 	}
10062 
10063 	ret = btrfs_reserve_extent(root, disk_num_bytes, disk_num_bytes,
10064 				   disk_num_bytes, 0, 0, &ins, true, true);
10065 	if (ret)
10066 		goto out_delalloc_release;
10067 	extent_reserved = true;
10068 
10069 	file_extent.disk_bytenr = ins.objectid;
10070 	file_extent.disk_num_bytes = ins.offset;
10071 	file_extent.num_bytes = num_bytes;
10072 	file_extent.ram_bytes = ram_bytes;
10073 	file_extent.offset = encoded->unencoded_offset;
10074 	file_extent.compression = compression;
10075 	em = btrfs_create_io_em(inode, start, &file_extent, BTRFS_ORDERED_COMPRESSED);
10076 	if (IS_ERR(em)) {
10077 		ret = PTR_ERR(em);
10078 		goto out_free_reserved;
10079 	}
10080 	btrfs_free_extent_map(em);
10081 
10082 	ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
10083 				       (1U << BTRFS_ORDERED_ENCODED) |
10084 				       (1U << BTRFS_ORDERED_COMPRESSED));
10085 	if (IS_ERR(ordered)) {
10086 		btrfs_drop_extent_map_range(inode, start, end, false);
10087 		ret = PTR_ERR(ordered);
10088 		goto out_free_reserved;
10089 	}
10090 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10091 
10092 	if (start + encoded->len > inode->vfs_inode.i_size)
10093 		i_size_write(&inode->vfs_inode, start + encoded->len);
10094 
10095 	btrfs_unlock_extent(io_tree, start, end, &cached_state);
10096 
10097 	btrfs_delalloc_release_extents(inode, num_bytes);
10098 
10099 	btrfs_submit_compressed_write(ordered, cb);
10100 	ret = orig_count;
10101 	goto out;
10102 
10103 out_free_reserved:
10104 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10105 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, true);
10106 out_delalloc_release:
10107 	btrfs_delalloc_release_extents(inode, num_bytes);
10108 	btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0);
10109 out_qgroup_free_data:
10110 	if (ret < 0)
10111 		btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes, NULL);
10112 out_free_data_space:
10113 	/*
10114 	 * If btrfs_reserve_extent() succeeded, then we already decremented
10115 	 * bytes_may_use.
10116 	 */
10117 	if (!extent_reserved)
10118 		btrfs_free_reserved_data_space_noquota(inode, disk_num_bytes);
10119 out_unlock:
10120 	btrfs_unlock_extent(io_tree, start, end, &cached_state);
10121 out_cb:
10122 	if (cb)
10123 		cleanup_compressed_bio(cb);
10124 out:
10125 	if (ret >= 0)
10126 		iocb->ki_pos += encoded->len;
10127 	return ret;
10128 }
10129 
10130 #ifdef CONFIG_SWAP
10131 /*
10132  * Add an entry indicating a block group or device which is pinned by a
10133  * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a
10134  * negative errno on failure.
10135  */
btrfs_add_swapfile_pin(struct inode * inode,void * ptr,bool is_block_group)10136 static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr,
10137 				  bool is_block_group)
10138 {
10139 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
10140 	struct btrfs_swapfile_pin *sp, *entry;
10141 	struct rb_node **p;
10142 	struct rb_node *parent = NULL;
10143 
10144 	sp = kmalloc_obj(*sp, GFP_NOFS);
10145 	if (!sp)
10146 		return -ENOMEM;
10147 	sp->ptr = ptr;
10148 	sp->inode = inode;
10149 	sp->is_block_group = is_block_group;
10150 	sp->bg_extent_count = 1;
10151 
10152 	spin_lock(&fs_info->swapfile_pins_lock);
10153 	p = &fs_info->swapfile_pins.rb_node;
10154 	while (*p) {
10155 		parent = *p;
10156 		entry = rb_entry(parent, struct btrfs_swapfile_pin, node);
10157 		if (sp->ptr < entry->ptr ||
10158 		    (sp->ptr == entry->ptr && sp->inode < entry->inode)) {
10159 			p = &(*p)->rb_left;
10160 		} else if (sp->ptr > entry->ptr ||
10161 			   (sp->ptr == entry->ptr && sp->inode > entry->inode)) {
10162 			p = &(*p)->rb_right;
10163 		} else {
10164 			if (is_block_group)
10165 				entry->bg_extent_count++;
10166 			spin_unlock(&fs_info->swapfile_pins_lock);
10167 			kfree(sp);
10168 			return 1;
10169 		}
10170 	}
10171 	rb_link_node(&sp->node, parent, p);
10172 	rb_insert_color(&sp->node, &fs_info->swapfile_pins);
10173 	spin_unlock(&fs_info->swapfile_pins_lock);
10174 	return 0;
10175 }
10176 
10177 /* Free all of the entries pinned by this swapfile. */
btrfs_free_swapfile_pins(struct inode * inode)10178 static void btrfs_free_swapfile_pins(struct inode *inode)
10179 {
10180 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
10181 	struct btrfs_swapfile_pin *sp;
10182 	struct rb_node *node, *next;
10183 
10184 	spin_lock(&fs_info->swapfile_pins_lock);
10185 	node = rb_first(&fs_info->swapfile_pins);
10186 	while (node) {
10187 		next = rb_next(node);
10188 		sp = rb_entry(node, struct btrfs_swapfile_pin, node);
10189 		if (sp->inode == inode) {
10190 			rb_erase(&sp->node, &fs_info->swapfile_pins);
10191 			if (sp->is_block_group) {
10192 				btrfs_dec_block_group_swap_extents(sp->ptr,
10193 							   sp->bg_extent_count);
10194 				btrfs_put_block_group(sp->ptr);
10195 			}
10196 			kfree(sp);
10197 		}
10198 		node = next;
10199 	}
10200 	spin_unlock(&fs_info->swapfile_pins_lock);
10201 }
10202 
10203 struct btrfs_swap_info {
10204 	u64 start;
10205 	u64 block_start;
10206 	u64 block_len;
10207 	u64 lowest_ppage;
10208 	u64 highest_ppage;
10209 	unsigned long nr_pages;
10210 	int nr_extents;
10211 };
10212 
btrfs_add_swap_extent(struct swap_info_struct * sis,struct btrfs_swap_info * bsi)10213 static int btrfs_add_swap_extent(struct swap_info_struct *sis,
10214 				 struct btrfs_swap_info *bsi)
10215 {
10216 	unsigned long nr_pages;
10217 	unsigned long max_pages;
10218 	u64 first_ppage, first_ppage_reported, next_ppage;
10219 	int ret;
10220 
10221 	/*
10222 	 * Our swapfile may have had its size extended after the swap header was
10223 	 * written. In that case activating the swapfile should not go beyond
10224 	 * the max size set in the swap header.
10225 	 */
10226 	if (bsi->nr_pages >= sis->max)
10227 		return 0;
10228 
10229 	max_pages = sis->max - bsi->nr_pages;
10230 	first_ppage = PAGE_ALIGN(bsi->block_start) >> PAGE_SHIFT;
10231 	next_ppage = PAGE_ALIGN_DOWN(bsi->block_start + bsi->block_len) >> PAGE_SHIFT;
10232 
10233 	if (first_ppage >= next_ppage)
10234 		return 0;
10235 	nr_pages = next_ppage - first_ppage;
10236 	nr_pages = min(nr_pages, max_pages);
10237 
10238 	first_ppage_reported = first_ppage;
10239 	if (bsi->start == 0)
10240 		first_ppage_reported++;
10241 	if (bsi->lowest_ppage > first_ppage_reported)
10242 		bsi->lowest_ppage = first_ppage_reported;
10243 	if (bsi->highest_ppage < (next_ppage - 1))
10244 		bsi->highest_ppage = next_ppage - 1;
10245 
10246 	ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage);
10247 	if (ret < 0)
10248 		return ret;
10249 	bsi->nr_extents += ret;
10250 	bsi->nr_pages += nr_pages;
10251 	return 0;
10252 }
10253 
btrfs_swap_deactivate(struct file * file)10254 static void btrfs_swap_deactivate(struct file *file)
10255 {
10256 	struct inode *inode = file_inode(file);
10257 
10258 	btrfs_free_swapfile_pins(inode);
10259 	atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles);
10260 }
10261 
btrfs_swap_activate(struct swap_info_struct * sis,struct file * file,sector_t * span)10262 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
10263 			       sector_t *span)
10264 {
10265 	struct inode *inode = file_inode(file);
10266 	struct btrfs_root *root = BTRFS_I(inode)->root;
10267 	struct btrfs_fs_info *fs_info = root->fs_info;
10268 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
10269 	struct extent_state *cached_state = NULL;
10270 	struct btrfs_chunk_map *map = NULL;
10271 	struct btrfs_device *device = NULL;
10272 	struct btrfs_swap_info bsi = {
10273 		.lowest_ppage = (sector_t)-1ULL,
10274 	};
10275 	struct btrfs_backref_share_check_ctx *backref_ctx = NULL;
10276 	struct btrfs_path *path = NULL;
10277 	int ret = 0;
10278 	u64 isize;
10279 	u64 prev_extent_end = 0;
10280 
10281 	/*
10282 	 * Acquire the inode's mmap lock to prevent races with memory mapped
10283 	 * writes, as they could happen after we flush delalloc below and before
10284 	 * we lock the extent range further below. The inode was already locked
10285 	 * up in the call chain.
10286 	 */
10287 	btrfs_assert_inode_locked(BTRFS_I(inode));
10288 	down_write(&BTRFS_I(inode)->i_mmap_lock);
10289 
10290 	/*
10291 	 * If the swap file was just created, make sure delalloc is done. If the
10292 	 * file changes again after this, the user is doing something stupid and
10293 	 * we don't really care.
10294 	 */
10295 	ret = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
10296 	if (ret)
10297 		goto out_unlock_mmap;
10298 
10299 	/*
10300 	 * The inode is locked, so these flags won't change after we check them.
10301 	 */
10302 	if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) {
10303 		btrfs_warn(fs_info, "swapfile must not be compressed");
10304 		ret = -EINVAL;
10305 		goto out_unlock_mmap;
10306 	}
10307 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) {
10308 		btrfs_warn(fs_info, "swapfile must not be copy-on-write");
10309 		ret = -EINVAL;
10310 		goto out_unlock_mmap;
10311 	}
10312 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
10313 		btrfs_warn(fs_info, "swapfile must not be checksummed");
10314 		ret = -EINVAL;
10315 		goto out_unlock_mmap;
10316 	}
10317 
10318 	path = btrfs_alloc_path();
10319 	backref_ctx = btrfs_alloc_backref_share_check_ctx();
10320 	if (!path || !backref_ctx) {
10321 		ret = -ENOMEM;
10322 		goto out_unlock_mmap;
10323 	}
10324 
10325 	/*
10326 	 * Balance or device remove/replace/resize can move stuff around from
10327 	 * under us. The exclop protection makes sure they aren't running/won't
10328 	 * run concurrently while we are mapping the swap extents, and
10329 	 * fs_info->swapfile_pins prevents them from running while the swap
10330 	 * file is active and moving the extents. Note that this also prevents
10331 	 * a concurrent device add which isn't actually necessary, but it's not
10332 	 * really worth the trouble to allow it.
10333 	 */
10334 	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) {
10335 		btrfs_warn(fs_info,
10336 	   "cannot activate swapfile while exclusive operation is running");
10337 		ret = -EBUSY;
10338 		goto out_unlock_mmap;
10339 	}
10340 
10341 	/*
10342 	 * Prevent snapshot creation while we are activating the swap file.
10343 	 * We do not want to race with snapshot creation. If snapshot creation
10344 	 * already started before we bumped nr_swapfiles from 0 to 1 and
10345 	 * completes before the first write into the swap file after it is
10346 	 * activated, than that write would fallback to COW.
10347 	 */
10348 	if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) {
10349 		btrfs_exclop_finish(fs_info);
10350 		btrfs_warn(fs_info,
10351 	   "cannot activate swapfile because snapshot creation is in progress");
10352 		ret = -EINVAL;
10353 		goto out_unlock_mmap;
10354 	}
10355 	/*
10356 	 * Snapshots can create extents which require COW even if NODATACOW is
10357 	 * set. We use this counter to prevent snapshots. We must increment it
10358 	 * before walking the extents because we don't want a concurrent
10359 	 * snapshot to run after we've already checked the extents.
10360 	 *
10361 	 * It is possible that subvolume is marked for deletion but still not
10362 	 * removed yet. To prevent this race, we check the root status before
10363 	 * activating the swapfile.
10364 	 */
10365 	spin_lock(&root->root_item_lock);
10366 	if (btrfs_root_dead(root)) {
10367 		spin_unlock(&root->root_item_lock);
10368 
10369 		btrfs_drew_write_unlock(&root->snapshot_lock);
10370 		btrfs_exclop_finish(fs_info);
10371 		btrfs_warn(fs_info,
10372 		"cannot activate swapfile because subvolume %llu is being deleted",
10373 			btrfs_root_id(root));
10374 		ret = -EPERM;
10375 		goto out_unlock_mmap;
10376 	}
10377 	atomic_inc(&root->nr_swapfiles);
10378 	spin_unlock(&root->root_item_lock);
10379 
10380 	isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
10381 
10382 	btrfs_lock_extent(io_tree, 0, isize - 1, &cached_state);
10383 	while (prev_extent_end < isize) {
10384 		struct btrfs_key key;
10385 		struct extent_buffer *leaf;
10386 		struct btrfs_file_extent_item *ei;
10387 		struct btrfs_block_group *bg;
10388 		u64 logical_block_start;
10389 		u64 physical_block_start;
10390 		u64 extent_gen;
10391 		u64 disk_bytenr;
10392 		u64 len;
10393 
10394 		key.objectid = btrfs_ino(BTRFS_I(inode));
10395 		key.type = BTRFS_EXTENT_DATA_KEY;
10396 		key.offset = prev_extent_end;
10397 
10398 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
10399 		if (ret < 0)
10400 			goto out;
10401 
10402 		/*
10403 		 * If key not found it means we have an implicit hole (NO_HOLES
10404 		 * is enabled).
10405 		 */
10406 		if (ret > 0) {
10407 			btrfs_warn(fs_info, "swapfile must not have holes");
10408 			ret = -EINVAL;
10409 			goto out;
10410 		}
10411 
10412 		leaf = path->nodes[0];
10413 		ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
10414 
10415 		if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) {
10416 			/*
10417 			 * It's unlikely we'll ever actually find ourselves
10418 			 * here, as a file small enough to fit inline won't be
10419 			 * big enough to store more than the swap header, but in
10420 			 * case something changes in the future, let's catch it
10421 			 * here rather than later.
10422 			 */
10423 			btrfs_warn(fs_info, "swapfile must not be inline");
10424 			ret = -EINVAL;
10425 			goto out;
10426 		}
10427 
10428 		if (btrfs_file_extent_compression(leaf, ei) != BTRFS_COMPRESS_NONE) {
10429 			btrfs_warn(fs_info, "swapfile must not be compressed");
10430 			ret = -EINVAL;
10431 			goto out;
10432 		}
10433 
10434 		disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
10435 		if (disk_bytenr == 0) {
10436 			btrfs_warn(fs_info, "swapfile must not have holes");
10437 			ret = -EINVAL;
10438 			goto out;
10439 		}
10440 
10441 		logical_block_start = disk_bytenr + btrfs_file_extent_offset(leaf, ei);
10442 		extent_gen = btrfs_file_extent_generation(leaf, ei);
10443 		prev_extent_end = btrfs_file_extent_end(path);
10444 
10445 		if (prev_extent_end > isize)
10446 			len = isize - key.offset;
10447 		else
10448 			len = btrfs_file_extent_num_bytes(leaf, ei);
10449 
10450 		backref_ctx->curr_leaf_bytenr = leaf->start;
10451 
10452 		/*
10453 		 * Don't need the path anymore, release to avoid deadlocks when
10454 		 * calling btrfs_is_data_extent_shared() because when joining a
10455 		 * transaction it can block waiting for the current one's commit
10456 		 * which in turn may be trying to lock the same leaf to flush
10457 		 * delayed items for example.
10458 		 */
10459 		btrfs_release_path(path);
10460 
10461 		ret = btrfs_is_data_extent_shared(BTRFS_I(inode), disk_bytenr,
10462 						  extent_gen, backref_ctx);
10463 		if (ret < 0) {
10464 			goto out;
10465 		} else if (ret > 0) {
10466 			btrfs_warn(fs_info,
10467 				   "swapfile must not be copy-on-write");
10468 			ret = -EINVAL;
10469 			goto out;
10470 		}
10471 
10472 		map = btrfs_get_chunk_map(fs_info, logical_block_start, len);
10473 		if (IS_ERR(map)) {
10474 			ret = PTR_ERR(map);
10475 			goto out;
10476 		}
10477 
10478 		if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
10479 			btrfs_warn(fs_info,
10480 				   "swapfile must have single data profile");
10481 			ret = -EINVAL;
10482 			goto out;
10483 		}
10484 
10485 		if (device == NULL) {
10486 			device = map->stripes[0].dev;
10487 			ret = btrfs_add_swapfile_pin(inode, device, false);
10488 			if (ret == 1)
10489 				ret = 0;
10490 			else if (ret)
10491 				goto out;
10492 		} else if (device != map->stripes[0].dev) {
10493 			btrfs_warn(fs_info, "swapfile must be on one device");
10494 			ret = -EINVAL;
10495 			goto out;
10496 		}
10497 
10498 		physical_block_start = (map->stripes[0].physical +
10499 					(logical_block_start - map->start));
10500 		btrfs_free_chunk_map(map);
10501 		map = NULL;
10502 
10503 		bg = btrfs_lookup_block_group(fs_info, logical_block_start);
10504 		if (!bg) {
10505 			btrfs_warn(fs_info,
10506 			   "could not find block group containing swapfile");
10507 			ret = -EINVAL;
10508 			goto out;
10509 		}
10510 
10511 		if (!btrfs_inc_block_group_swap_extents(bg)) {
10512 			btrfs_warn(fs_info,
10513 			   "block group for swapfile at %llu is read-only%s",
10514 			   bg->start,
10515 			   atomic_read(&fs_info->scrubs_running) ?
10516 				       " (scrub running)" : "");
10517 			btrfs_put_block_group(bg);
10518 			ret = -EINVAL;
10519 			goto out;
10520 		}
10521 
10522 		ret = btrfs_add_swapfile_pin(inode, bg, true);
10523 		if (ret) {
10524 			btrfs_put_block_group(bg);
10525 			if (ret == 1)
10526 				ret = 0;
10527 			else
10528 				goto out;
10529 		}
10530 
10531 		if (bsi.block_len &&
10532 		    bsi.block_start + bsi.block_len == physical_block_start) {
10533 			bsi.block_len += len;
10534 		} else {
10535 			if (bsi.block_len) {
10536 				ret = btrfs_add_swap_extent(sis, &bsi);
10537 				if (ret)
10538 					goto out;
10539 			}
10540 			bsi.start = key.offset;
10541 			bsi.block_start = physical_block_start;
10542 			bsi.block_len = len;
10543 		}
10544 
10545 		if (fatal_signal_pending(current)) {
10546 			ret = -EINTR;
10547 			goto out;
10548 		}
10549 
10550 		cond_resched();
10551 	}
10552 
10553 	if (bsi.block_len)
10554 		ret = btrfs_add_swap_extent(sis, &bsi);
10555 
10556 out:
10557 	if (!IS_ERR_OR_NULL(map))
10558 		btrfs_free_chunk_map(map);
10559 
10560 	btrfs_unlock_extent(io_tree, 0, isize - 1, &cached_state);
10561 
10562 	if (ret)
10563 		btrfs_swap_deactivate(file);
10564 
10565 	btrfs_drew_write_unlock(&root->snapshot_lock);
10566 
10567 	btrfs_exclop_finish(fs_info);
10568 
10569 out_unlock_mmap:
10570 	up_write(&BTRFS_I(inode)->i_mmap_lock);
10571 	btrfs_free_backref_share_ctx(backref_ctx);
10572 	btrfs_free_path(path);
10573 	if (ret)
10574 		return ret;
10575 
10576 	if (device)
10577 		sis->bdev = device->bdev;
10578 	*span = bsi.highest_ppage - bsi.lowest_ppage + 1;
10579 	sis->max = bsi.nr_pages;
10580 	sis->pages = bsi.nr_pages - 1;
10581 	return bsi.nr_extents;
10582 }
10583 #else
btrfs_swap_deactivate(struct file * file)10584 static void btrfs_swap_deactivate(struct file *file)
10585 {
10586 }
10587 
btrfs_swap_activate(struct swap_info_struct * sis,struct file * file,sector_t * span)10588 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
10589 			       sector_t *span)
10590 {
10591 	return -EOPNOTSUPP;
10592 }
10593 #endif
10594 
10595 /*
10596  * Update the number of bytes used in the VFS' inode. When we replace extents in
10597  * a range (clone, dedupe, fallocate's zero range), we must update the number of
10598  * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls
10599  * always get a correct value.
10600  */
btrfs_update_inode_bytes(struct btrfs_inode * inode,const u64 add_bytes,const u64 del_bytes)10601 void btrfs_update_inode_bytes(struct btrfs_inode *inode,
10602 			      const u64 add_bytes,
10603 			      const u64 del_bytes)
10604 {
10605 	if (add_bytes == del_bytes)
10606 		return;
10607 
10608 	spin_lock(&inode->lock);
10609 	if (del_bytes > 0)
10610 		inode_sub_bytes(&inode->vfs_inode, del_bytes);
10611 	if (add_bytes > 0)
10612 		inode_add_bytes(&inode->vfs_inode, add_bytes);
10613 	spin_unlock(&inode->lock);
10614 }
10615 
10616 /*
10617  * Verify that there are no ordered extents for a given file range.
10618  *
10619  * @inode:   The target inode.
10620  * @start:   Start offset of the file range, should be sector size aligned.
10621  * @end:     End offset (inclusive) of the file range, its value +1 should be
10622  *           sector size aligned.
10623  *
10624  * This should typically be used for cases where we locked an inode's VFS lock in
10625  * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode,
10626  * we have flushed all delalloc in the range, we have waited for all ordered
10627  * extents in the range to complete and finally we have locked the file range in
10628  * the inode's io_tree.
10629  */
btrfs_assert_inode_range_clean(struct btrfs_inode * inode,u64 start,u64 end)10630 void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end)
10631 {
10632 	struct btrfs_root *root = inode->root;
10633 	struct btrfs_ordered_extent *ordered;
10634 
10635 	if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
10636 		return;
10637 
10638 	ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start);
10639 	if (ordered) {
10640 		btrfs_err(root->fs_info,
10641 "found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])",
10642 			  start, end, btrfs_ino(inode), btrfs_root_id(root),
10643 			  ordered->file_offset,
10644 			  ordered->file_offset + ordered->num_bytes - 1);
10645 		btrfs_put_ordered_extent(ordered);
10646 	}
10647 
10648 	ASSERT(ordered == NULL);
10649 }
10650 
10651 /*
10652  * Find the first inode with a minimum number.
10653  *
10654  * @root:	The root to search for.
10655  * @min_ino:	The minimum inode number.
10656  *
10657  * Find the first inode in the @root with a number >= @min_ino and return it.
10658  * Returns NULL if no such inode found.
10659  */
btrfs_find_first_inode(struct btrfs_root * root,u64 min_ino)10660 struct btrfs_inode *btrfs_find_first_inode(struct btrfs_root *root, u64 min_ino)
10661 {
10662 	struct btrfs_inode *inode;
10663 	unsigned long from = min_ino;
10664 
10665 	xa_lock(&root->inodes);
10666 	while (true) {
10667 		inode = xa_find(&root->inodes, &from, ULONG_MAX, XA_PRESENT);
10668 		if (!inode)
10669 			break;
10670 		if (igrab(&inode->vfs_inode))
10671 			break;
10672 
10673 		from = btrfs_ino(inode) + 1;
10674 		cond_resched_lock(&root->inodes.xa_lock);
10675 	}
10676 	xa_unlock(&root->inodes);
10677 
10678 	return inode;
10679 }
10680 
10681 static const struct inode_operations btrfs_dir_inode_operations = {
10682 	.getattr	= btrfs_getattr,
10683 	.lookup		= btrfs_lookup,
10684 	.create		= btrfs_create,
10685 	.unlink		= btrfs_unlink,
10686 	.link		= btrfs_link,
10687 	.mkdir		= btrfs_mkdir,
10688 	.rmdir		= btrfs_rmdir,
10689 	.rename		= btrfs_rename2,
10690 	.symlink	= btrfs_symlink,
10691 	.setattr	= btrfs_setattr,
10692 	.mknod		= btrfs_mknod,
10693 	.listxattr	= btrfs_listxattr,
10694 	.permission	= btrfs_permission,
10695 	.get_inode_acl	= btrfs_get_acl,
10696 	.set_acl	= btrfs_set_acl,
10697 	.update_time	= btrfs_update_time,
10698 	.tmpfile        = btrfs_tmpfile,
10699 	.fileattr_get	= btrfs_fileattr_get,
10700 	.fileattr_set	= btrfs_fileattr_set,
10701 };
10702 
10703 static const struct file_operations btrfs_dir_file_operations = {
10704 	.llseek		= btrfs_dir_llseek,
10705 	.read		= generic_read_dir,
10706 	.iterate_shared	= btrfs_real_readdir,
10707 	.open		= btrfs_opendir,
10708 	.unlocked_ioctl	= btrfs_ioctl,
10709 #ifdef CONFIG_COMPAT
10710 	.compat_ioctl	= btrfs_compat_ioctl,
10711 #endif
10712 	.release        = btrfs_release_file,
10713 	.fsync		= btrfs_sync_file,
10714 	.setlease	= generic_setlease,
10715 };
10716 
10717 /*
10718  * btrfs doesn't support the bmap operation because swapfiles
10719  * use bmap to make a mapping of extents in the file.  They assume
10720  * these extents won't change over the life of the file and they
10721  * use the bmap result to do IO directly to the drive.
10722  *
10723  * the btrfs bmap call would return logical addresses that aren't
10724  * suitable for IO and they also will change frequently as COW
10725  * operations happen.  So, swapfile + btrfs == corruption.
10726  *
10727  * For now we're avoiding this by dropping bmap.
10728  */
10729 static const struct address_space_operations btrfs_aops = {
10730 	.read_folio	= btrfs_read_folio,
10731 	.writepages	= btrfs_writepages,
10732 	.readahead	= btrfs_readahead,
10733 	.invalidate_folio = btrfs_invalidate_folio,
10734 	.launder_folio	= btrfs_launder_folio,
10735 	.release_folio	= btrfs_release_folio,
10736 	.migrate_folio	= btrfs_migrate_folio,
10737 	.dirty_folio	= filemap_dirty_folio,
10738 	.error_remove_folio = generic_error_remove_folio,
10739 	.swap_activate	= btrfs_swap_activate,
10740 	.swap_deactivate = btrfs_swap_deactivate,
10741 };
10742 
10743 static const struct inode_operations btrfs_file_inode_operations = {
10744 	.getattr	= btrfs_getattr,
10745 	.setattr	= btrfs_setattr,
10746 	.listxattr      = btrfs_listxattr,
10747 	.permission	= btrfs_permission,
10748 	.fiemap		= btrfs_fiemap,
10749 	.get_inode_acl	= btrfs_get_acl,
10750 	.set_acl	= btrfs_set_acl,
10751 	.update_time	= btrfs_update_time,
10752 	.fileattr_get	= btrfs_fileattr_get,
10753 	.fileattr_set	= btrfs_fileattr_set,
10754 };
10755 static const struct inode_operations btrfs_special_inode_operations = {
10756 	.getattr	= btrfs_getattr,
10757 	.setattr	= btrfs_setattr,
10758 	.permission	= btrfs_permission,
10759 	.listxattr	= btrfs_listxattr,
10760 	.get_inode_acl	= btrfs_get_acl,
10761 	.set_acl	= btrfs_set_acl,
10762 	.update_time	= btrfs_update_time,
10763 };
10764 static const struct inode_operations btrfs_symlink_inode_operations = {
10765 	.get_link	= page_get_link,
10766 	.getattr	= btrfs_getattr,
10767 	.setattr	= btrfs_setattr,
10768 	.permission	= btrfs_permission,
10769 	.listxattr	= btrfs_listxattr,
10770 	.update_time	= btrfs_update_time,
10771 };
10772 
10773 const struct dentry_operations btrfs_dentry_operations = {
10774 	.d_delete	= btrfs_dentry_delete,
10775 };
10776