xref: /linux/fs/btrfs/inode.c (revision c44db6c820140ffbc0e293a34c6a6de4b363422b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/bio.h>
8 #include <linux/blk-cgroup.h>
9 #include <linux/file.h>
10 #include <linux/filelock.h>
11 #include <linux/fs.h>
12 #include <linux/fs_struct.h>
13 #include <linux/pagemap.h>
14 #include <linux/highmem.h>
15 #include <linux/time.h>
16 #include <linux/init.h>
17 #include <linux/string.h>
18 #include <linux/backing-dev.h>
19 #include <linux/writeback.h>
20 #include <linux/compat.h>
21 #include <linux/xattr.h>
22 #include <linux/posix_acl.h>
23 #include <linux/falloc.h>
24 #include <linux/slab.h>
25 #include <linux/ratelimit.h>
26 #include <linux/btrfs.h>
27 #include <linux/blkdev.h>
28 #include <linux/posix_acl_xattr.h>
29 #include <linux/uio.h>
30 #include <linux/magic.h>
31 #include <linux/iversion.h>
32 #include <linux/swap.h>
33 #include <linux/migrate.h>
34 #include <linux/sched/mm.h>
35 #include <linux/iomap.h>
36 #include <linux/unaligned.h>
37 #include "misc.h"
38 #include "ctree.h"
39 #include "disk-io.h"
40 #include "transaction.h"
41 #include "btrfs_inode.h"
42 #include "ordered-data.h"
43 #include "xattr.h"
44 #include "tree-log.h"
45 #include "bio.h"
46 #include "compression.h"
47 #include "locking.h"
48 #include "props.h"
49 #include "qgroup.h"
50 #include "delalloc-space.h"
51 #include "block-group.h"
52 #include "space-info.h"
53 #include "zoned.h"
54 #include "subpage.h"
55 #include "inode-item.h"
56 #include "fs.h"
57 #include "accessors.h"
58 #include "extent-tree.h"
59 #include "root-tree.h"
60 #include "defrag.h"
61 #include "dir-item.h"
62 #include "file-item.h"
63 #include "uuid-tree.h"
64 #include "ioctl.h"
65 #include "file.h"
66 #include "acl.h"
67 #include "relocation.h"
68 #include "verity.h"
69 #include "super.h"
70 #include "orphan.h"
71 #include "backref.h"
72 #include "raid-stripe-tree.h"
73 #include "fiemap.h"
74 #include "delayed-inode.h"
75 
76 #define COW_FILE_RANGE_KEEP_LOCKED	(1UL << 0)
77 #define COW_FILE_RANGE_NO_INLINE	(1UL << 1)
78 
79 struct btrfs_iget_args {
80 	u64 ino;
81 	struct btrfs_root *root;
82 };
83 
84 struct btrfs_rename_ctx {
85 	/* Output field. Stores the index number of the old directory entry. */
86 	u64 index;
87 };
88 
89 /*
90  * Used by data_reloc_print_warning_inode() to pass needed info for filename
91  * resolution and output of error message.
92  */
93 struct data_reloc_warn {
94 	struct btrfs_path path;
95 	struct btrfs_fs_info *fs_info;
96 	u64 extent_item_size;
97 	u64 logical;
98 	int mirror_num;
99 };
100 
101 /*
102  * For the file_extent_tree, we want to hold the inode lock when we lookup and
103  * update the disk_i_size, but lockdep will complain because our io_tree we hold
104  * the tree lock and get the inode lock when setting delalloc. These two things
105  * are unrelated, so make a class for the file_extent_tree so we don't get the
106  * two locking patterns mixed up.
107  */
108 static struct lock_class_key file_extent_tree_class;
109 
110 static const struct inode_operations btrfs_dir_inode_operations;
111 static const struct inode_operations btrfs_symlink_inode_operations;
112 static const struct inode_operations btrfs_special_inode_operations;
113 static const struct inode_operations btrfs_file_inode_operations;
114 static const struct address_space_operations btrfs_aops;
115 static const struct file_operations btrfs_dir_file_operations;
116 
117 static struct kmem_cache *btrfs_inode_cachep;
118 
119 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
120 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback);
121 
122 static noinline int run_delalloc_cow(struct btrfs_inode *inode,
123 				     struct folio *locked_folio, u64 start,
124 				     u64 end, struct writeback_control *wbc,
125 				     bool pages_dirty);
126 
data_reloc_print_warning_inode(u64 inum,u64 offset,u64 num_bytes,u64 root,void * warn_ctx)127 static int data_reloc_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
128 					  u64 root, void *warn_ctx)
129 {
130 	struct data_reloc_warn *warn = warn_ctx;
131 	struct btrfs_fs_info *fs_info = warn->fs_info;
132 	struct extent_buffer *eb;
133 	struct btrfs_inode_item *inode_item;
134 	struct inode_fs_paths *ipath __free(inode_fs_paths) = NULL;
135 	struct btrfs_root *local_root;
136 	struct btrfs_key key;
137 	unsigned int nofs_flag;
138 	u32 nlink;
139 	int ret;
140 
141 	local_root = btrfs_get_fs_root(fs_info, root, true);
142 	if (IS_ERR(local_root)) {
143 		ret = PTR_ERR(local_root);
144 		goto err;
145 	}
146 
147 	/* This makes the path point to (inum INODE_ITEM ioff). */
148 	key.objectid = inum;
149 	key.type = BTRFS_INODE_ITEM_KEY;
150 	key.offset = 0;
151 
152 	ret = btrfs_search_slot(NULL, local_root, &key, &warn->path, 0, 0);
153 	if (ret) {
154 		btrfs_put_root(local_root);
155 		btrfs_release_path(&warn->path);
156 		goto err;
157 	}
158 
159 	eb = warn->path.nodes[0];
160 	inode_item = btrfs_item_ptr(eb, warn->path.slots[0], struct btrfs_inode_item);
161 	nlink = btrfs_inode_nlink(eb, inode_item);
162 	btrfs_release_path(&warn->path);
163 
164 	nofs_flag = memalloc_nofs_save();
165 	ipath = init_ipath(4096, local_root, &warn->path);
166 	memalloc_nofs_restore(nofs_flag);
167 	if (IS_ERR(ipath)) {
168 		btrfs_put_root(local_root);
169 		ret = PTR_ERR(ipath);
170 		ipath = NULL;
171 		/*
172 		 * -ENOMEM, not a critical error, just output an generic error
173 		 * without filename.
174 		 */
175 		btrfs_warn(fs_info,
176 "checksum error at logical %llu mirror %u root %llu, inode %llu offset %llu",
177 			   warn->logical, warn->mirror_num, root, inum, offset);
178 		return ret;
179 	}
180 	ret = paths_from_inode(inum, ipath);
181 	if (ret < 0) {
182 		btrfs_put_root(local_root);
183 		goto err;
184 	}
185 
186 	/*
187 	 * We deliberately ignore the bit ipath might have been too small to
188 	 * hold all of the paths here
189 	 */
190 	for (int i = 0; i < ipath->fspath->elem_cnt; i++) {
191 		btrfs_warn(fs_info,
192 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu length %u links %u (path: %s)",
193 			   warn->logical, warn->mirror_num, root, inum, offset,
194 			   fs_info->sectorsize, nlink,
195 			   (char *)(unsigned long)ipath->fspath->val[i]);
196 	}
197 
198 	btrfs_put_root(local_root);
199 	return 0;
200 
201 err:
202 	btrfs_warn(fs_info,
203 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu, path resolving failed with ret=%d",
204 		   warn->logical, warn->mirror_num, root, inum, offset, ret);
205 
206 	return ret;
207 }
208 
209 /*
210  * Do extra user-friendly error output (e.g. lookup all the affected files).
211  *
212  * Return true if we succeeded doing the backref lookup.
213  * Return false if such lookup failed, and has to fallback to the old error message.
214  */
print_data_reloc_error(const struct btrfs_inode * inode,u64 file_off,const u8 * csum,const u8 * csum_expected,int mirror_num)215 static void print_data_reloc_error(const struct btrfs_inode *inode, u64 file_off,
216 				   const u8 *csum, const u8 *csum_expected,
217 				   int mirror_num)
218 {
219 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
220 	BTRFS_PATH_AUTO_RELEASE(path);
221 	struct btrfs_key found_key = { 0 };
222 	struct extent_buffer *eb;
223 	struct btrfs_extent_item *ei;
224 	const u32 csum_size = fs_info->csum_size;
225 	u64 logical;
226 	u64 flags;
227 	u32 item_size;
228 	int ret;
229 
230 	mutex_lock(&fs_info->reloc_mutex);
231 	logical = btrfs_get_reloc_bg_bytenr(fs_info);
232 	mutex_unlock(&fs_info->reloc_mutex);
233 
234 	if (logical == U64_MAX) {
235 		btrfs_warn_rl(fs_info, "has data reloc tree but no running relocation");
236 		btrfs_warn_rl(fs_info,
237 "csum failed root %lld ino %llu off %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d",
238 			btrfs_root_id(inode->root), btrfs_ino(inode), file_off,
239 			BTRFS_CSUM_FMT_VALUE(csum_size, csum),
240 			BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected),
241 			mirror_num);
242 		return;
243 	}
244 
245 	logical += file_off;
246 	btrfs_warn_rl(fs_info,
247 "csum failed root %lld ino %llu off %llu logical %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d",
248 			btrfs_root_id(inode->root),
249 			btrfs_ino(inode), file_off, logical,
250 			BTRFS_CSUM_FMT_VALUE(csum_size, csum),
251 			BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected),
252 			mirror_num);
253 
254 	ret = extent_from_logical(fs_info, logical, &path, &found_key, &flags);
255 	if (ret < 0) {
256 		btrfs_err_rl(fs_info, "failed to lookup extent item for logical %llu: %d",
257 			     logical, ret);
258 		return;
259 	}
260 	eb = path.nodes[0];
261 	ei = btrfs_item_ptr(eb, path.slots[0], struct btrfs_extent_item);
262 	item_size = btrfs_item_size(eb, path.slots[0]);
263 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
264 		unsigned long ptr = 0;
265 		u64 ref_root;
266 		u8 ref_level;
267 
268 		while (true) {
269 			ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
270 						      item_size, &ref_root,
271 						      &ref_level);
272 			if (ret < 0) {
273 				btrfs_warn_rl(fs_info,
274 				"failed to resolve tree backref for logical %llu: %d",
275 					      logical, ret);
276 				break;
277 			}
278 			if (ret > 0)
279 				break;
280 
281 			btrfs_warn_rl(fs_info,
282 "csum error at logical %llu mirror %u: metadata %s (level %d) in tree %llu",
283 				logical, mirror_num,
284 				(ref_level ? "node" : "leaf"),
285 				ref_level, ref_root);
286 		}
287 	} else {
288 		struct btrfs_backref_walk_ctx ctx = { 0 };
289 		struct data_reloc_warn reloc_warn = { 0 };
290 
291 		/*
292 		 * Do not hold the path as later iterate_extent_inodes() call
293 		 * can be time consuming.
294 		 */
295 		btrfs_release_path(&path);
296 
297 		ctx.bytenr = found_key.objectid;
298 		ctx.extent_item_pos = logical - found_key.objectid;
299 		ctx.fs_info = fs_info;
300 
301 		reloc_warn.logical = logical;
302 		reloc_warn.extent_item_size = found_key.offset;
303 		reloc_warn.mirror_num = mirror_num;
304 		reloc_warn.fs_info = fs_info;
305 
306 		iterate_extent_inodes(&ctx, true,
307 				      data_reloc_print_warning_inode, &reloc_warn);
308 	}
309 }
310 
btrfs_print_data_csum_error(struct btrfs_inode * inode,u64 logical_start,u8 * csum,u8 * csum_expected,int mirror_num)311 static void __cold btrfs_print_data_csum_error(struct btrfs_inode *inode,
312 		u64 logical_start, u8 *csum, u8 *csum_expected, int mirror_num)
313 {
314 	struct btrfs_root *root = inode->root;
315 	const u32 csum_size = root->fs_info->csum_size;
316 
317 	/* For data reloc tree, it's better to do a backref lookup instead. */
318 	if (btrfs_is_data_reloc_root(root))
319 		return print_data_reloc_error(inode, logical_start, csum,
320 					      csum_expected, mirror_num);
321 
322 	/* Output without objectid, which is more meaningful */
323 	if (btrfs_root_id(root) >= BTRFS_LAST_FREE_OBJECTID) {
324 		btrfs_warn_rl(root->fs_info,
325 "csum failed root %lld ino %lld off %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d",
326 			btrfs_root_id(root), btrfs_ino(inode),
327 			logical_start,
328 			BTRFS_CSUM_FMT_VALUE(csum_size, csum),
329 			BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected),
330 			mirror_num);
331 	} else {
332 		btrfs_warn_rl(root->fs_info,
333 "csum failed root %llu ino %llu off %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d",
334 			btrfs_root_id(root), btrfs_ino(inode),
335 			logical_start,
336 			BTRFS_CSUM_FMT_VALUE(csum_size, csum),
337 			BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected),
338 			mirror_num);
339 	}
340 }
341 
342 /*
343  * Lock inode i_rwsem based on arguments passed.
344  *
345  * ilock_flags can have the following bit set:
346  *
347  * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode
348  * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt
349  *		     return -EAGAIN
350  * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock
351  */
btrfs_inode_lock(struct btrfs_inode * inode,unsigned int ilock_flags)352 int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags)
353 {
354 	if (ilock_flags & BTRFS_ILOCK_SHARED) {
355 		if (ilock_flags & BTRFS_ILOCK_TRY) {
356 			if (!inode_trylock_shared(&inode->vfs_inode))
357 				return -EAGAIN;
358 			else
359 				return 0;
360 		}
361 		inode_lock_shared(&inode->vfs_inode);
362 	} else {
363 		if (ilock_flags & BTRFS_ILOCK_TRY) {
364 			if (!inode_trylock(&inode->vfs_inode))
365 				return -EAGAIN;
366 			else
367 				return 0;
368 		}
369 		inode_lock(&inode->vfs_inode);
370 	}
371 	if (ilock_flags & BTRFS_ILOCK_MMAP)
372 		down_write(&inode->i_mmap_lock);
373 	return 0;
374 }
375 
376 /*
377  * Unlock inode i_rwsem.
378  *
379  * ilock_flags should contain the same bits set as passed to btrfs_inode_lock()
380  * to decide whether the lock acquired is shared or exclusive.
381  */
btrfs_inode_unlock(struct btrfs_inode * inode,unsigned int ilock_flags)382 void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags)
383 {
384 	if (ilock_flags & BTRFS_ILOCK_MMAP)
385 		up_write(&inode->i_mmap_lock);
386 	if (ilock_flags & BTRFS_ILOCK_SHARED)
387 		inode_unlock_shared(&inode->vfs_inode);
388 	else
389 		inode_unlock(&inode->vfs_inode);
390 }
391 
392 /*
393  * Cleanup all submitted ordered extents in specified range to handle errors
394  * from the btrfs_run_delalloc_range() callback.
395  *
396  * NOTE: caller must ensure that when an error happens, it can not call
397  * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
398  * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
399  * to be released, which we want to happen only when finishing the ordered
400  * extent (btrfs_finish_ordered_io()).
401  */
btrfs_cleanup_ordered_extents(struct btrfs_inode * inode,u64 offset,u64 bytes)402 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
403 						 u64 offset, u64 bytes)
404 {
405 	pgoff_t index = offset >> PAGE_SHIFT;
406 	const pgoff_t end_index = (offset + bytes - 1) >> PAGE_SHIFT;
407 	struct folio *folio;
408 
409 	while (index <= end_index) {
410 		folio = filemap_get_folio(inode->vfs_inode.i_mapping, index);
411 		if (IS_ERR(folio)) {
412 			index++;
413 			continue;
414 		}
415 
416 		index = folio_next_index(folio);
417 		/*
418 		 * Here we just clear all Ordered bits for every page in the
419 		 * range, then btrfs_mark_ordered_io_finished() will handle
420 		 * the ordered extent accounting for the range.
421 		 */
422 		btrfs_folio_clamp_clear_ordered(inode->root->fs_info, folio,
423 						offset, bytes);
424 		folio_put(folio);
425 	}
426 
427 	return btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes, false);
428 }
429 
430 static int btrfs_dirty_inode(struct btrfs_inode *inode);
431 
btrfs_init_inode_security(struct btrfs_trans_handle * trans,struct btrfs_new_inode_args * args)432 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
433 				     struct btrfs_new_inode_args *args)
434 {
435 	int ret;
436 
437 	if (args->default_acl) {
438 		ret = __btrfs_set_acl(trans, args->inode, args->default_acl,
439 				      ACL_TYPE_DEFAULT);
440 		if (ret)
441 			return ret;
442 	}
443 	if (args->acl) {
444 		ret = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS);
445 		if (ret)
446 			return ret;
447 	}
448 	if (!args->default_acl && !args->acl)
449 		cache_no_acl(args->inode);
450 	return btrfs_xattr_security_init(trans, args->inode, args->dir,
451 					 &args->dentry->d_name);
452 }
453 
454 /*
455  * this does all the hard work for inserting an inline extent into
456  * the btree.  The caller should have done a btrfs_drop_extents so that
457  * no overlapping inline items exist in the btree
458  */
insert_inline_extent(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_inode * inode,bool extent_inserted,size_t size,size_t compressed_size,int compress_type,struct folio * compressed_folio,bool update_i_size)459 static int insert_inline_extent(struct btrfs_trans_handle *trans,
460 				struct btrfs_path *path,
461 				struct btrfs_inode *inode, bool extent_inserted,
462 				size_t size, size_t compressed_size,
463 				int compress_type,
464 				struct folio *compressed_folio,
465 				bool update_i_size)
466 {
467 	struct btrfs_root *root = inode->root;
468 	struct extent_buffer *leaf;
469 	const u32 sectorsize = trans->fs_info->sectorsize;
470 	char *kaddr;
471 	unsigned long ptr;
472 	struct btrfs_file_extent_item *ei;
473 	int ret;
474 	size_t cur_size = size;
475 	u64 i_size;
476 
477 	/*
478 	 * The decompressed size must still be no larger than a sector.  Under
479 	 * heavy race, we can have size == 0 passed in, but that shouldn't be a
480 	 * big deal and we can continue the insertion.
481 	 */
482 	ASSERT(size <= sectorsize);
483 
484 	/*
485 	 * The compressed size also needs to be no larger than a page.
486 	 * That's also why we only need one folio as the parameter.
487 	 */
488 	if (compressed_folio) {
489 		ASSERT(compressed_size <= sectorsize);
490 		ASSERT(compressed_size <= PAGE_SIZE);
491 	} else {
492 		ASSERT(compressed_size == 0);
493 	}
494 
495 	if (compressed_size && compressed_folio)
496 		cur_size = compressed_size;
497 
498 	if (!extent_inserted) {
499 		struct btrfs_key key;
500 		size_t datasize;
501 
502 		key.objectid = btrfs_ino(inode);
503 		key.type = BTRFS_EXTENT_DATA_KEY;
504 		key.offset = 0;
505 
506 		datasize = btrfs_file_extent_calc_inline_size(cur_size);
507 		ret = btrfs_insert_empty_item(trans, root, path, &key,
508 					      datasize);
509 		if (ret)
510 			return ret;
511 	}
512 	leaf = path->nodes[0];
513 	ei = btrfs_item_ptr(leaf, path->slots[0],
514 			    struct btrfs_file_extent_item);
515 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
516 	btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
517 	btrfs_set_file_extent_encryption(leaf, ei, 0);
518 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
519 	btrfs_set_file_extent_ram_bytes(leaf, ei, size);
520 	ptr = btrfs_file_extent_inline_start(ei);
521 
522 	if (compress_type != BTRFS_COMPRESS_NONE) {
523 		kaddr = kmap_local_folio(compressed_folio, 0);
524 		write_extent_buffer(leaf, kaddr, ptr, compressed_size);
525 		kunmap_local(kaddr);
526 
527 		btrfs_set_file_extent_compression(leaf, ei,
528 						  compress_type);
529 	} else {
530 		struct folio *folio;
531 
532 		folio = filemap_get_folio(inode->vfs_inode.i_mapping, 0);
533 		ASSERT(!IS_ERR(folio));
534 		btrfs_set_file_extent_compression(leaf, ei, 0);
535 		kaddr = kmap_local_folio(folio, 0);
536 		write_extent_buffer(leaf, kaddr, ptr, size);
537 		kunmap_local(kaddr);
538 		folio_put(folio);
539 	}
540 	btrfs_release_path(path);
541 
542 	/*
543 	 * We align size to sectorsize for inline extents just for simplicity
544 	 * sake.
545 	 */
546 	ret = btrfs_inode_set_file_extent_range(inode, 0,
547 					ALIGN(size, root->fs_info->sectorsize));
548 	if (ret)
549 		return ret;
550 
551 	/*
552 	 * We're an inline extent, so nobody can extend the file past i_size
553 	 * without locking a page we already have locked.
554 	 *
555 	 * We must do any i_size and inode updates before we unlock the pages.
556 	 * Otherwise we could end up racing with unlink.
557 	 */
558 	i_size = i_size_read(&inode->vfs_inode);
559 	if (update_i_size && size > i_size) {
560 		i_size_write(&inode->vfs_inode, size);
561 		i_size = size;
562 	}
563 	inode->disk_i_size = i_size;
564 
565 	return 0;
566 }
567 
can_cow_file_range_inline(struct btrfs_inode * inode,u64 offset,u64 size,size_t compressed_size)568 static bool can_cow_file_range_inline(struct btrfs_inode *inode,
569 				      u64 offset, u64 size,
570 				      size_t compressed_size)
571 {
572 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
573 	u64 data_len = (compressed_size ?: size);
574 
575 	/* Inline extents must start at offset 0. */
576 	if (offset != 0)
577 		return false;
578 
579 	/*
580 	 * Even for bs > ps cases, cow_file_range_inline() can only accept a
581 	 * single folio.
582 	 *
583 	 * This can be problematic and cause access beyond page boundary if a
584 	 * page sized folio is passed into that function.
585 	 * And encoded write is doing exactly that.
586 	 * So here limits the inlined extent size to PAGE_SIZE.
587 	 */
588 	if (size > PAGE_SIZE || compressed_size > PAGE_SIZE)
589 		return false;
590 
591 	/* Inline extents are limited to sectorsize. */
592 	if (size > fs_info->sectorsize)
593 		return false;
594 
595 	/* We do not allow a non-compressed extent to be as large as block size. */
596 	if (data_len >= fs_info->sectorsize)
597 		return false;
598 
599 	/* We cannot exceed the maximum inline data size. */
600 	if (data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
601 		return false;
602 
603 	/* We cannot exceed the user specified max_inline size. */
604 	if (data_len > fs_info->max_inline)
605 		return false;
606 
607 	/* Inline extents must be the entirety of the file. */
608 	if (size < i_size_read(&inode->vfs_inode))
609 		return false;
610 
611 	/* Encrypted file cannot be inlined. */
612 	if (IS_ENCRYPTED(&inode->vfs_inode))
613 		return false;
614 
615 	return true;
616 }
617 
618 /*
619  * conditionally insert an inline extent into the file.  This
620  * does the checks required to make sure the data is small enough
621  * to fit as an inline extent.
622  *
623  * If being used directly, you must have already checked we're allowed to cow
624  * the range by getting true from can_cow_file_range_inline().
625  */
__cow_file_range_inline(struct btrfs_inode * inode,u64 size,size_t compressed_size,int compress_type,struct folio * compressed_folio,bool update_i_size)626 static noinline int __cow_file_range_inline(struct btrfs_inode *inode,
627 					    u64 size, size_t compressed_size,
628 					    int compress_type,
629 					    struct folio *compressed_folio,
630 					    bool update_i_size)
631 {
632 	struct btrfs_drop_extents_args drop_args = { 0 };
633 	struct btrfs_root *root = inode->root;
634 	struct btrfs_fs_info *fs_info = root->fs_info;
635 	struct btrfs_trans_handle *trans = NULL;
636 	u64 data_len = (compressed_size ?: size);
637 	int ret;
638 	struct btrfs_path *path;
639 
640 	path = btrfs_alloc_path();
641 	if (!path) {
642 		ret = -ENOMEM;
643 		goto out;
644 	}
645 
646 	trans = btrfs_join_transaction(root);
647 	if (IS_ERR(trans)) {
648 		ret = PTR_ERR(trans);
649 		trans = NULL;
650 		goto out;
651 	}
652 	trans->block_rsv = &inode->block_rsv;
653 
654 	drop_args.path = path;
655 	drop_args.start = 0;
656 	drop_args.end = fs_info->sectorsize;
657 	drop_args.drop_cache = true;
658 	drop_args.replace_extent = true;
659 	drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(data_len);
660 	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
661 	if (unlikely(ret)) {
662 		btrfs_abort_transaction(trans, ret);
663 		goto out;
664 	}
665 
666 	ret = insert_inline_extent(trans, path, inode, drop_args.extent_inserted,
667 				   size, compressed_size, compress_type,
668 				   compressed_folio, update_i_size);
669 	if (unlikely(ret && ret != -ENOSPC)) {
670 		btrfs_abort_transaction(trans, ret);
671 		goto out;
672 	} else if (ret == -ENOSPC) {
673 		ret = 1;
674 		goto out;
675 	}
676 
677 	btrfs_update_inode_bytes(inode, size, drop_args.bytes_found);
678 	ret = btrfs_update_inode(trans, inode);
679 	if (unlikely(ret && ret != -ENOSPC)) {
680 		btrfs_abort_transaction(trans, ret);
681 		goto out;
682 	} else if (ret == -ENOSPC) {
683 		ret = 1;
684 		goto out;
685 	}
686 
687 	btrfs_set_inode_full_sync(inode);
688 out:
689 	/*
690 	 * Don't forget to free the reserved space, as for inlined extent
691 	 * it won't count as data extent, free them directly here.
692 	 * And at reserve time, it's always aligned to sector size, so
693 	 * just free one sector here.
694 	 *
695 	 * If we fallback to non-inline (ret == 1) due to -ENOSPC, then we need
696 	 * to keep the data reservation.
697 	 */
698 	if (ret <= 0)
699 		btrfs_qgroup_free_data(inode, NULL, 0, fs_info->sectorsize, NULL);
700 	btrfs_free_path(path);
701 	if (trans)
702 		btrfs_end_transaction(trans);
703 	return ret;
704 }
705 
cow_file_range_inline(struct btrfs_inode * inode,struct folio * locked_folio,u64 offset,u64 end,size_t compressed_size,int compress_type,struct folio * compressed_folio,bool update_i_size)706 static noinline int cow_file_range_inline(struct btrfs_inode *inode,
707 					  struct folio *locked_folio,
708 					  u64 offset, u64 end,
709 					  size_t compressed_size,
710 					  int compress_type,
711 					  struct folio *compressed_folio,
712 					  bool update_i_size)
713 {
714 	struct extent_state *cached = NULL;
715 	unsigned long clear_flags = EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
716 		EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING | EXTENT_LOCKED;
717 	u64 size = min_t(u64, i_size_read(&inode->vfs_inode), end + 1);
718 	int ret;
719 
720 	if (!can_cow_file_range_inline(inode, offset, size, compressed_size))
721 		return 1;
722 
723 	btrfs_lock_extent(&inode->io_tree, offset, end, &cached);
724 	ret = __cow_file_range_inline(inode, size, compressed_size,
725 				      compress_type, compressed_folio,
726 				      update_i_size);
727 	if (ret > 0) {
728 		btrfs_unlock_extent(&inode->io_tree, offset, end, &cached);
729 		return ret;
730 	}
731 
732 	/*
733 	 * In the successful case (ret == 0 here), cow_file_range will return 1.
734 	 *
735 	 * Quite a bit further up the callstack in extent_writepage(), ret == 1
736 	 * is treated as a short circuited success and does not unlock the folio,
737 	 * so we must do it here.
738 	 *
739 	 * In the failure case, the locked_folio does get unlocked by
740 	 * btrfs_folio_end_all_writers, which asserts that it is still locked
741 	 * at that point, so we must *not* unlock it here.
742 	 *
743 	 * The other two callsites in compress_file_range do not have a
744 	 * locked_folio, so they are not relevant to this logic.
745 	 */
746 	if (ret == 0)
747 		locked_folio = NULL;
748 
749 	extent_clear_unlock_delalloc(inode, offset, end, locked_folio, &cached,
750 				     clear_flags, PAGE_UNLOCK |
751 				     PAGE_START_WRITEBACK | PAGE_END_WRITEBACK);
752 	return ret;
753 }
754 
755 struct async_extent {
756 	u64 start;
757 	u64 ram_size;
758 	struct compressed_bio *cb;
759 	struct list_head list;
760 };
761 
762 struct async_chunk {
763 	struct btrfs_inode *inode;
764 	struct folio *locked_folio;
765 	u64 start;
766 	u64 end;
767 	blk_opf_t write_flags;
768 	struct list_head extents;
769 	struct cgroup_subsys_state *blkcg_css;
770 	struct btrfs_work work;
771 	struct async_cow *async_cow;
772 };
773 
774 struct async_cow {
775 	atomic_t num_chunks;
776 	struct async_chunk chunks[];
777 };
778 
add_async_extent(struct async_chunk * cow,u64 start,u64 ram_size,struct compressed_bio * cb)779 static int add_async_extent(struct async_chunk *cow, u64 start, u64 ram_size,
780 			    struct compressed_bio *cb)
781 {
782 	struct async_extent *async_extent;
783 
784 	async_extent = kmalloc_obj(*async_extent, GFP_NOFS);
785 	if (!async_extent)
786 		return -ENOMEM;
787 	ASSERT(ram_size < U32_MAX);
788 	async_extent->start = start;
789 	async_extent->ram_size = ram_size;
790 	async_extent->cb = cb;
791 	list_add_tail(&async_extent->list, &cow->extents);
792 	return 0;
793 }
794 
795 /*
796  * Check if the inode needs to be submitted to compression, based on mount
797  * options, defragmentation, properties or heuristics.
798  */
inode_need_compress(struct btrfs_inode * inode,u64 start,u64 end)799 static inline int inode_need_compress(struct btrfs_inode *inode, u64 start,
800 				      u64 end)
801 {
802 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
803 
804 	if (!btrfs_inode_can_compress(inode)) {
805 		DEBUG_WARN("BTRFS: unexpected compression for ino %llu", btrfs_ino(inode));
806 		return 0;
807 	}
808 
809 	/*
810 	 * If the delalloc range is only one fs block and can not be inlined,
811 	 * do not even bother try compression, as there will be no space saving
812 	 * and will always fallback to regular write later.
813 	 */
814 	if (start != 0 && end + 1 - start <= fs_info->sectorsize)
815 		return 0;
816 	/* Defrag ioctl takes precedence over mount options and properties. */
817 	if (inode->defrag_compress == BTRFS_DEFRAG_DONT_COMPRESS)
818 		return 0;
819 	if (BTRFS_COMPRESS_NONE < inode->defrag_compress &&
820 	    inode->defrag_compress < BTRFS_NR_COMPRESS_TYPES)
821 		return 1;
822 	/* force compress */
823 	if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
824 		return 1;
825 	/* bad compression ratios */
826 	if (inode->flags & BTRFS_INODE_NOCOMPRESS)
827 		return 0;
828 	if (btrfs_test_opt(fs_info, COMPRESS) ||
829 	    inode->flags & BTRFS_INODE_COMPRESS ||
830 	    inode->prop_compress)
831 		return btrfs_compress_heuristic(inode, start, end);
832 	return 0;
833 }
834 
inode_should_defrag(struct btrfs_inode * inode,u64 start,u64 end,u64 num_bytes,u32 small_write)835 static inline void inode_should_defrag(struct btrfs_inode *inode,
836 		u64 start, u64 end, u64 num_bytes, u32 small_write)
837 {
838 	/* If this is a small write inside eof, kick off a defrag */
839 	if (num_bytes < small_write &&
840 	    (start > 0 || end + 1 < inode->disk_i_size))
841 		btrfs_add_inode_defrag(inode, small_write);
842 }
843 
extent_range_clear_dirty_for_io(struct btrfs_inode * inode,u64 start,u64 end)844 static int extent_range_clear_dirty_for_io(struct btrfs_inode *inode, u64 start, u64 end)
845 {
846 	const pgoff_t end_index = end >> PAGE_SHIFT;
847 	struct folio *folio;
848 	int ret = 0;
849 
850 	for (pgoff_t index = start >> PAGE_SHIFT; index <= end_index; index++) {
851 		folio = filemap_get_folio(inode->vfs_inode.i_mapping, index);
852 		if (IS_ERR(folio)) {
853 			if (!ret)
854 				ret = PTR_ERR(folio);
855 			continue;
856 		}
857 		btrfs_folio_clamp_clear_dirty(inode->root->fs_info, folio, start,
858 					      end + 1 - start);
859 		folio_put(folio);
860 	}
861 	return ret;
862 }
863 
compressed_bio_last_folio(struct compressed_bio * cb)864 static struct folio *compressed_bio_last_folio(struct compressed_bio *cb)
865 {
866 	struct bio *bio = &cb->bbio.bio;
867 	struct bio_vec *bvec;
868 	phys_addr_t paddr;
869 
870 	/*
871 	 * Make sure all folios have the same min_folio_size.
872 	 *
873 	 * Otherwise we cannot simply use offset_in_offset(folio, bi_size) to
874 	 * calculate the end of the last folio.
875 	 */
876 	if (IS_ENABLED(CONFIG_BTRFS_ASSERT)) {
877 		struct btrfs_fs_info *fs_info = cb_to_fs_info(cb);
878 		const u32 min_folio_size = btrfs_min_folio_size(fs_info);
879 		struct folio_iter fi;
880 
881 		bio_for_each_folio_all(fi, bio)
882 			ASSERT(folio_size(fi.folio) == min_folio_size);
883 	}
884 
885 	/* The bio must not be empty. */
886 	ASSERT(bio->bi_vcnt);
887 
888 	bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
889 	paddr = page_to_phys(bvec->bv_page) + bvec->bv_offset + bvec->bv_len - 1;
890 	return page_folio(phys_to_page(paddr));
891 }
892 
zero_last_folio(struct compressed_bio * cb)893 static void zero_last_folio(struct compressed_bio *cb)
894 {
895 	struct bio *bio = &cb->bbio.bio;
896 	struct folio *last_folio = compressed_bio_last_folio(cb);
897 	const u32 bio_size = bio->bi_iter.bi_size;
898 	const u32 foffset = offset_in_folio(last_folio, bio_size);
899 
900 	folio_zero_range(last_folio, foffset, folio_size(last_folio) - foffset);
901 }
902 
round_up_last_block(struct compressed_bio * cb,u32 blocksize)903 static void round_up_last_block(struct compressed_bio *cb, u32 blocksize)
904 {
905 	struct bio *bio = &cb->bbio.bio;
906 	struct folio *last_folio = compressed_bio_last_folio(cb);
907 	const u32 bio_size = bio->bi_iter.bi_size;
908 	const u32 foffset = offset_in_folio(last_folio, bio_size);
909 	bool ret;
910 
911 	if (IS_ALIGNED(bio_size, blocksize))
912 		return;
913 
914 	ret = bio_add_folio(bio, last_folio, round_up(foffset, blocksize) - foffset, foffset);
915 	/* The remaining part should be merged thus never fail. */
916 	ASSERT(ret);
917 }
918 
919 /*
920  * Work queue call back to started compression on a file and pages.
921  *
922  * This is done inside an ordered work queue, and the compression is spread
923  * across many cpus.  The actual IO submission is step two, and the ordered work
924  * queue takes care of making sure that happens in the same order things were
925  * put onto the queue by writepages and friends.
926  *
927  * If this code finds it can't get good compression, it puts an entry onto the
928  * work queue to write the uncompressed bytes.  This makes sure that both
929  * compressed inodes and uncompressed inodes are written in the same order that
930  * the flusher thread sent them down.
931  */
compress_file_range(struct btrfs_work * work)932 static void compress_file_range(struct btrfs_work *work)
933 {
934 	struct async_chunk *async_chunk =
935 		container_of(work, struct async_chunk, work);
936 	struct btrfs_inode *inode = async_chunk->inode;
937 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
938 	struct address_space *mapping = inode->vfs_inode.i_mapping;
939 	struct compressed_bio *cb = NULL;
940 	const u32 min_folio_size = btrfs_min_folio_size(fs_info);
941 	u64 blocksize = fs_info->sectorsize;
942 	u64 start = async_chunk->start;
943 	u64 end = async_chunk->end;
944 	u64 actual_end;
945 	u64 i_size;
946 	u32 cur_len;
947 	int ret = 0;
948 	unsigned long total_compressed = 0;
949 	unsigned long total_in = 0;
950 	unsigned int loff;
951 	int compress_type = fs_info->compress_type;
952 	int compress_level = fs_info->compress_level;
953 
954 	if (btrfs_is_shutdown(fs_info))
955 		goto cleanup_and_bail_uncompressed;
956 
957 	inode_should_defrag(inode, start, end, end - start + 1, SZ_16K);
958 
959 	/*
960 	 * We need to call clear_page_dirty_for_io on each page in the range.
961 	 * Otherwise applications with the file mmap'd can wander in and change
962 	 * the page contents while we are compressing them.
963 	 */
964 	ret = extent_range_clear_dirty_for_io(inode, start, end);
965 
966 	/*
967 	 * All the folios should have been locked thus no failure.
968 	 *
969 	 * And even if some folios are missing, btrfs_compress_bio()
970 	 * would handle them correctly, so here just do an ASSERT() check for
971 	 * early logic errors.
972 	 */
973 	ASSERT(ret == 0);
974 
975 	/*
976 	 * We need to save i_size before now because it could change in between
977 	 * us evaluating the size and assigning it.  This is because we lock and
978 	 * unlock the page in truncate and fallocate, and then modify the i_size
979 	 * later on.
980 	 *
981 	 * The barriers are to emulate READ_ONCE, remove that once i_size_read
982 	 * does that for us.
983 	 */
984 	barrier();
985 	i_size = i_size_read(&inode->vfs_inode);
986 	barrier();
987 	actual_end = min_t(u64, i_size, end + 1);
988 again:
989 	total_in = 0;
990 	cur_len = min(end + 1 - start, BTRFS_MAX_UNCOMPRESSED);
991 	ret = 0;
992 	cb = NULL;
993 
994 	/*
995 	 * we don't want to send crud past the end of i_size through
996 	 * compression, that's just a waste of CPU time.  So, if the
997 	 * end of the file is before the start of our current
998 	 * requested range of bytes, we bail out to the uncompressed
999 	 * cleanup code that can deal with all of this.
1000 	 *
1001 	 * It isn't really the fastest way to fix things, but this is a
1002 	 * very uncommon corner.
1003 	 */
1004 	if (actual_end <= start)
1005 		goto cleanup_and_bail_uncompressed;
1006 
1007 	/*
1008 	 * We do compression for mount -o compress and when the inode has not
1009 	 * been flagged as NOCOMPRESS.  This flag can change at any time if we
1010 	 * discover bad compression ratios.
1011 	 */
1012 	if (!inode_need_compress(inode, start, end))
1013 		goto cleanup_and_bail_uncompressed;
1014 
1015 	if (0 < inode->defrag_compress && inode->defrag_compress < BTRFS_NR_COMPRESS_TYPES) {
1016 		compress_type = inode->defrag_compress;
1017 		compress_level = inode->defrag_compress_level;
1018 	} else if (inode->prop_compress) {
1019 		compress_type = inode->prop_compress;
1020 	}
1021 
1022 	/* Compression level is applied here. */
1023 	cb = btrfs_compress_bio(inode, start, cur_len, compress_type,
1024 				 compress_level, async_chunk->write_flags);
1025 	if (IS_ERR(cb)) {
1026 		cb = NULL;
1027 		goto mark_incompressible;
1028 	}
1029 
1030 	total_compressed = cb->bbio.bio.bi_iter.bi_size;
1031 	total_in = cur_len;
1032 
1033 	/*
1034 	 * Zero the tail end of the last folio, as we might be sending it down
1035 	 * to disk.
1036 	 */
1037 	loff = (total_compressed & (min_folio_size - 1));
1038 	if (loff)
1039 		zero_last_folio(cb);
1040 
1041 	/*
1042 	 * Try to create an inline extent.
1043 	 *
1044 	 * If we didn't compress the entire range, try to create an uncompressed
1045 	 * inline extent, else a compressed one.
1046 	 *
1047 	 * Check cow_file_range() for why we don't even try to create inline
1048 	 * extent for the subpage case.
1049 	 */
1050 	if (total_in < actual_end)
1051 		ret = cow_file_range_inline(inode, NULL, start, end, 0,
1052 					    BTRFS_COMPRESS_NONE, NULL, false);
1053 	else
1054 		ret = cow_file_range_inline(inode, NULL, start, end, total_compressed,
1055 					    compress_type,
1056 					    bio_first_folio_all(&cb->bbio.bio), false);
1057 	if (ret <= 0) {
1058 		cleanup_compressed_bio(cb);
1059 		if (ret < 0)
1060 			mapping_set_error(mapping, -EIO);
1061 		return;
1062 	}
1063 
1064 	/*
1065 	 * We aren't doing an inline extent. Round the compressed size up to a
1066 	 * block size boundary so the allocator does sane things.
1067 	 */
1068 	total_compressed = ALIGN(total_compressed, blocksize);
1069 	round_up_last_block(cb, blocksize);
1070 
1071 	/*
1072 	 * One last check to make sure the compression is really a win, compare
1073 	 * the page count read with the blocks on disk, compression must free at
1074 	 * least one sector.
1075 	 */
1076 	total_in = round_up(total_in, fs_info->sectorsize);
1077 	if (total_compressed + blocksize > total_in)
1078 		goto mark_incompressible;
1079 
1080 
1081 	/*
1082 	 * The async work queues will take care of doing actual allocation on
1083 	 * disk for these compressed pages, and will submit the bios.
1084 	 */
1085 	ret = add_async_extent(async_chunk, start, total_in, cb);
1086 	BUG_ON(ret);
1087 	if (start + total_in < end) {
1088 		start += total_in;
1089 		cond_resched();
1090 		goto again;
1091 	}
1092 	return;
1093 
1094 mark_incompressible:
1095 	if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && !inode->prop_compress)
1096 		inode->flags |= BTRFS_INODE_NOCOMPRESS;
1097 cleanup_and_bail_uncompressed:
1098 	ret = add_async_extent(async_chunk, start, end - start + 1, NULL);
1099 	BUG_ON(ret);
1100 	if (cb)
1101 		cleanup_compressed_bio(cb);
1102 }
1103 
submit_uncompressed_range(struct btrfs_inode * inode,struct async_extent * async_extent,struct folio * locked_folio)1104 static void submit_uncompressed_range(struct btrfs_inode *inode,
1105 				      struct async_extent *async_extent,
1106 				      struct folio *locked_folio)
1107 {
1108 	u64 start = async_extent->start;
1109 	u64 end = async_extent->start + async_extent->ram_size - 1;
1110 	int ret;
1111 	struct writeback_control wbc = {
1112 		.sync_mode		= WB_SYNC_ALL,
1113 		.range_start		= start,
1114 		.range_end		= end,
1115 		.no_cgroup_owner	= 1,
1116 	};
1117 
1118 	wbc_attach_fdatawrite_inode(&wbc, &inode->vfs_inode);
1119 	ret = run_delalloc_cow(inode, locked_folio, start, end,
1120 			       &wbc, false);
1121 	wbc_detach_inode(&wbc);
1122 	if (ret < 0) {
1123 		if (locked_folio)
1124 			btrfs_folio_end_lock(inode->root->fs_info, locked_folio,
1125 					     start, async_extent->ram_size);
1126 		btrfs_err_rl(inode->root->fs_info,
1127 			"%s failed, root=%llu inode=%llu start=%llu len=%llu: %d",
1128 			     __func__, btrfs_root_id(inode->root),
1129 			     btrfs_ino(inode), start, async_extent->ram_size, ret);
1130 	}
1131 }
1132 
submit_one_async_extent(struct async_chunk * async_chunk,struct async_extent * async_extent,u64 * alloc_hint)1133 static void submit_one_async_extent(struct async_chunk *async_chunk,
1134 				    struct async_extent *async_extent,
1135 				    u64 *alloc_hint)
1136 {
1137 	struct btrfs_inode *inode = async_chunk->inode;
1138 	struct extent_io_tree *io_tree = &inode->io_tree;
1139 	struct btrfs_root *root = inode->root;
1140 	struct btrfs_fs_info *fs_info = root->fs_info;
1141 	struct btrfs_ordered_extent *ordered;
1142 	struct btrfs_file_extent file_extent;
1143 	struct btrfs_key ins;
1144 	struct folio *locked_folio = NULL;
1145 	struct extent_state *cached = NULL;
1146 	struct extent_map *em;
1147 	int ret = 0;
1148 	u32 compressed_size;
1149 	u64 start = async_extent->start;
1150 	u64 end = async_extent->start + async_extent->ram_size - 1;
1151 
1152 	if (async_chunk->blkcg_css)
1153 		kthread_associate_blkcg(async_chunk->blkcg_css);
1154 
1155 	/*
1156 	 * If async_chunk->locked_folio is in the async_extent range, we need to
1157 	 * handle it.
1158 	 */
1159 	if (async_chunk->locked_folio) {
1160 		u64 locked_folio_start = folio_pos(async_chunk->locked_folio);
1161 		u64 locked_folio_end = locked_folio_start +
1162 			folio_size(async_chunk->locked_folio) - 1;
1163 
1164 		if (!(start >= locked_folio_end || end <= locked_folio_start))
1165 			locked_folio = async_chunk->locked_folio;
1166 	}
1167 
1168 	if (!async_extent->cb) {
1169 		submit_uncompressed_range(inode, async_extent, locked_folio);
1170 		goto done;
1171 	}
1172 
1173 	compressed_size = async_extent->cb->bbio.bio.bi_iter.bi_size;
1174 	ret = btrfs_reserve_extent(root, async_extent->ram_size,
1175 				   compressed_size, compressed_size,
1176 				   0, *alloc_hint, &ins, true, true);
1177 	if (ret) {
1178 		/*
1179 		 * We can't reserve contiguous space for the compressed size.
1180 		 * Unlikely, but it's possible that we could have enough
1181 		 * non-contiguous space for the uncompressed size instead.  So
1182 		 * fall back to uncompressed.
1183 		 */
1184 		submit_uncompressed_range(inode, async_extent, locked_folio);
1185 		cleanup_compressed_bio(async_extent->cb);
1186 		async_extent->cb = NULL;
1187 		goto done;
1188 	}
1189 
1190 	btrfs_lock_extent(io_tree, start, end, &cached);
1191 
1192 	/* Here we're doing allocation and writeback of the compressed pages */
1193 	file_extent.disk_bytenr = ins.objectid;
1194 	file_extent.disk_num_bytes = ins.offset;
1195 	file_extent.ram_bytes = async_extent->ram_size;
1196 	file_extent.num_bytes = async_extent->ram_size;
1197 	file_extent.offset = 0;
1198 	file_extent.compression = async_extent->cb->compress_type;
1199 
1200 	async_extent->cb->bbio.bio.bi_iter.bi_sector = ins.objectid >> SECTOR_SHIFT;
1201 
1202 	em = btrfs_create_io_em(inode, start, &file_extent, BTRFS_ORDERED_COMPRESSED);
1203 	if (IS_ERR(em)) {
1204 		ret = PTR_ERR(em);
1205 		goto out_free_reserve;
1206 	}
1207 	btrfs_free_extent_map(em);
1208 
1209 	ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
1210 					     1U << BTRFS_ORDERED_COMPRESSED);
1211 	if (IS_ERR(ordered)) {
1212 		btrfs_drop_extent_map_range(inode, start, end, false);
1213 		ret = PTR_ERR(ordered);
1214 		goto out_free_reserve;
1215 	}
1216 	async_extent->cb->bbio.ordered = ordered;
1217 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1218 
1219 	/* Clear dirty, set writeback and unlock the pages. */
1220 	extent_clear_unlock_delalloc(inode, start, end,
1221 			NULL, &cached, EXTENT_LOCKED | EXTENT_DELALLOC,
1222 			PAGE_UNLOCK | PAGE_START_WRITEBACK);
1223 	btrfs_submit_bbio(&async_extent->cb->bbio, 0);
1224 	async_extent->cb = NULL;
1225 
1226 	*alloc_hint = ins.objectid + ins.offset;
1227 done:
1228 	if (async_chunk->blkcg_css)
1229 		kthread_associate_blkcg(NULL);
1230 	kfree(async_extent);
1231 	return;
1232 
1233 out_free_reserve:
1234 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1235 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, true);
1236 	mapping_set_error(inode->vfs_inode.i_mapping, -EIO);
1237 	extent_clear_unlock_delalloc(inode, start, end,
1238 				     NULL, &cached,
1239 				     EXTENT_LOCKED | EXTENT_DELALLOC |
1240 				     EXTENT_DELALLOC_NEW |
1241 				     EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
1242 				     PAGE_UNLOCK | PAGE_START_WRITEBACK |
1243 				     PAGE_END_WRITEBACK);
1244 	if (async_extent->cb)
1245 		cleanup_compressed_bio(async_extent->cb);
1246 	if (async_chunk->blkcg_css)
1247 		kthread_associate_blkcg(NULL);
1248 	btrfs_debug(fs_info,
1249 "async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d",
1250 		    btrfs_root_id(root), btrfs_ino(inode), start,
1251 		    async_extent->ram_size, ret);
1252 	kfree(async_extent);
1253 }
1254 
btrfs_get_extent_allocation_hint(struct btrfs_inode * inode,u64 start,u64 num_bytes)1255 u64 btrfs_get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
1256 				     u64 num_bytes)
1257 {
1258 	struct extent_map_tree *em_tree = &inode->extent_tree;
1259 	struct extent_map *em;
1260 	u64 alloc_hint = 0;
1261 
1262 	read_lock(&em_tree->lock);
1263 	em = btrfs_search_extent_mapping(em_tree, start, num_bytes);
1264 	if (em) {
1265 		/*
1266 		 * if block start isn't an actual block number then find the
1267 		 * first block in this inode and use that as a hint.  If that
1268 		 * block is also bogus then just don't worry about it.
1269 		 */
1270 		if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
1271 			btrfs_free_extent_map(em);
1272 			em = btrfs_search_extent_mapping(em_tree, 0, 0);
1273 			if (em && em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
1274 				alloc_hint = btrfs_extent_map_block_start(em);
1275 			if (em)
1276 				btrfs_free_extent_map(em);
1277 		} else {
1278 			alloc_hint = btrfs_extent_map_block_start(em);
1279 			btrfs_free_extent_map(em);
1280 		}
1281 	}
1282 	read_unlock(&em_tree->lock);
1283 
1284 	return alloc_hint;
1285 }
1286 
1287 /*
1288  * Handle COW for one range.
1289  *
1290  * @ins:		The key representing the allocated range.
1291  * @file_offset:	The file offset of the COW range
1292  * @num_bytes:		The expected length of the COW range
1293  *			The actually allocated length can be smaller than it.
1294  * @min_alloc_size:	The minimal extent size.
1295  * @alloc_hint:		The hint for the extent allocator.
1296  * @ret_alloc_size:	The COW range handles by this function.
1297  *
1298  * Return 0 if everything is fine and update @ret_alloc_size updated.  The
1299  * range is still locked, and caller should unlock the range after everything
1300  * is done or for error handling.
1301  *
1302  * Return <0 for error and @is updated for where the extra cleanup should
1303  * happen. The range [file_offset, file_offset + ret_alloc_size) will be
1304  * cleaned up by this function.
1305  */
cow_one_range(struct btrfs_inode * inode,struct folio * locked_folio,struct btrfs_key * ins,struct extent_state ** cached,u64 file_offset,u32 num_bytes,u32 min_alloc_size,u64 alloc_hint,u32 * ret_alloc_size)1306 static int cow_one_range(struct btrfs_inode *inode, struct folio *locked_folio,
1307 			 struct btrfs_key *ins, struct extent_state **cached,
1308 			 u64 file_offset, u32 num_bytes, u32 min_alloc_size,
1309 			 u64 alloc_hint, u32 *ret_alloc_size)
1310 {
1311 	struct btrfs_root *root = inode->root;
1312 	struct btrfs_fs_info *fs_info = root->fs_info;
1313 	struct btrfs_ordered_extent *ordered;
1314 	struct btrfs_file_extent file_extent;
1315 	struct extent_map *em;
1316 	u32 cur_len = 0;
1317 	u64 cur_end;
1318 	int ret;
1319 
1320 	ret = btrfs_reserve_extent(root, num_bytes, num_bytes, min_alloc_size,
1321 				   0, alloc_hint, ins, true, true);
1322 	if (ret < 0) {
1323 		*ret_alloc_size = cur_len;
1324 		return ret;
1325 	}
1326 
1327 	cur_len = ins->offset;
1328 	cur_end = file_offset + cur_len - 1;
1329 
1330 	file_extent.disk_bytenr = ins->objectid;
1331 	file_extent.disk_num_bytes = ins->offset;
1332 	file_extent.num_bytes = ins->offset;
1333 	file_extent.ram_bytes = ins->offset;
1334 	file_extent.offset = 0;
1335 	file_extent.compression = BTRFS_COMPRESS_NONE;
1336 
1337 	/*
1338 	 * Locked range will be released either during error clean up (inside
1339 	 * this function or by the caller for previously successful ranges) or
1340 	 * after the whole range is finished.
1341 	 */
1342 	btrfs_lock_extent(&inode->io_tree, file_offset, cur_end, cached);
1343 	em = btrfs_create_io_em(inode, file_offset, &file_extent, BTRFS_ORDERED_REGULAR);
1344 	if (IS_ERR(em)) {
1345 		ret = PTR_ERR(em);
1346 		goto free_reserved;
1347 	}
1348 	btrfs_free_extent_map(em);
1349 
1350 	ordered = btrfs_alloc_ordered_extent(inode, file_offset, &file_extent,
1351 					     1U << BTRFS_ORDERED_REGULAR);
1352 	if (IS_ERR(ordered)) {
1353 		btrfs_drop_extent_map_range(inode, file_offset, cur_end, false);
1354 		ret = PTR_ERR(ordered);
1355 		goto free_reserved;
1356 	}
1357 
1358 	if (btrfs_is_data_reloc_root(root)) {
1359 		ret = btrfs_reloc_clone_csums(ordered);
1360 
1361 		/*
1362 		 * Only drop cache here, and process as normal.
1363 		 *
1364 		 * We must not allow extent_clear_unlock_delalloc() at
1365 		 * free_reserved label to free meta of this ordered extent, as
1366 		 * its meta should be freed by btrfs_finish_ordered_io().
1367 		 *
1368 		 * So we must continue until @start is increased to
1369 		 * skip current ordered extent.
1370 		 */
1371 		if (ret)
1372 			btrfs_drop_extent_map_range(inode, file_offset,
1373 						    cur_end, false);
1374 	}
1375 	btrfs_put_ordered_extent(ordered);
1376 	btrfs_dec_block_group_reservations(fs_info, ins->objectid);
1377 	/*
1378 	 * Error handling for btrfs_reloc_clone_csums().
1379 	 *
1380 	 * Treat the range as finished, thus only clear EXTENT_LOCKED | EXTENT_DELALLOC.
1381 	 * The accounting will be done by ordered extents.
1382 	 */
1383 	if (unlikely(ret < 0)) {
1384 		btrfs_cleanup_ordered_extents(inode, file_offset, cur_len);
1385 		extent_clear_unlock_delalloc(inode, file_offset, cur_end, locked_folio, cached,
1386 					     EXTENT_LOCKED | EXTENT_DELALLOC,
1387 					     PAGE_UNLOCK | PAGE_START_WRITEBACK |
1388 					     PAGE_END_WRITEBACK);
1389 		mapping_set_error(inode->vfs_inode.i_mapping, -EIO);
1390 	}
1391 	*ret_alloc_size = cur_len;
1392 	return ret;
1393 
1394 free_reserved:
1395 	/*
1396 	 * If we have reserved an extent for the current range and failed to
1397 	 * create the respective extent map or ordered extent, it means that
1398 	 * when we reserved the extent we decremented the extent's size from
1399 	 * the data space_info's bytes_may_use counter and
1400 	 * incremented the space_info's bytes_reserved counter by the same
1401 	 * amount.
1402 	 *
1403 	 * We must make sure extent_clear_unlock_delalloc() does not try
1404 	 * to decrement again the data space_info's bytes_may_use counter, which
1405 	 * will be handled by btrfs_free_reserved_extent().
1406 	 *
1407 	 * Therefore we do not pass it the flag EXTENT_CLEAR_DATA_RESV, but only
1408 	 * EXTENT_CLEAR_META_RESV.
1409 	 */
1410 	extent_clear_unlock_delalloc(inode, file_offset, cur_end, locked_folio, cached,
1411 				     EXTENT_LOCKED | EXTENT_DELALLOC |
1412 				     EXTENT_DELALLOC_NEW |
1413 				     EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV,
1414 				     PAGE_UNLOCK | PAGE_START_WRITEBACK |
1415 				     PAGE_END_WRITEBACK);
1416 	btrfs_qgroup_free_data(inode, NULL, file_offset, cur_len, NULL);
1417 	btrfs_dec_block_group_reservations(fs_info, ins->objectid);
1418 	btrfs_free_reserved_extent(fs_info, ins->objectid, ins->offset, true);
1419 	mapping_set_error(inode->vfs_inode.i_mapping, -EIO);
1420 	*ret_alloc_size = cur_len;
1421 	/*
1422 	 * We should not return -EAGAIN where it's a special return code for
1423 	 * zoned to catch btrfs_reserved_extent().
1424 	 */
1425 	ASSERT(ret != -EAGAIN);
1426 	return ret;
1427 }
1428 
1429 /*
1430  * when extent_io.c finds a delayed allocation range in the file,
1431  * the call backs end up in this code.  The basic idea is to
1432  * allocate extents on disk for the range, and create ordered data structs
1433  * in ram to track those extents.
1434  *
1435  * locked_folio is the folio that writepage had locked already.  We use
1436  * it to make sure we don't do extra locks or unlocks.
1437  *
1438  * When this function fails, it unlocks all folios except @locked_folio.
1439  *
1440  * When this function successfully creates an inline extent, it returns 1 and
1441  * unlocks all folios including locked_folio and starts I/O on them.
1442  * (In reality inline extents are limited to a single block, so locked_folio is
1443  * the only folio handled anyway).
1444  *
1445  * When this function succeed and creates a normal extent, the folio locking
1446  * status depends on the passed in flags:
1447  *
1448  * - If COW_FILE_RANGE_KEEP_LOCKED flag is set, all folios are kept locked.
1449  * - Else all folios except for @locked_folio are unlocked.
1450  *
1451  * When a failure happens in the second or later iteration of the
1452  * while-loop, the ordered extents created in previous iterations are cleaned up.
1453  */
cow_file_range(struct btrfs_inode * inode,struct folio * locked_folio,u64 start,u64 end,u64 * done_offset,unsigned long flags)1454 static noinline int cow_file_range(struct btrfs_inode *inode,
1455 				   struct folio *locked_folio, u64 start,
1456 				   u64 end, u64 *done_offset,
1457 				   unsigned long flags)
1458 {
1459 	struct btrfs_root *root = inode->root;
1460 	struct btrfs_fs_info *fs_info = root->fs_info;
1461 	struct extent_state *cached = NULL;
1462 	u64 alloc_hint = 0;
1463 	u64 orig_start = start;
1464 	u64 num_bytes;
1465 	u32 min_alloc_size;
1466 	u32 blocksize = fs_info->sectorsize;
1467 	u32 cur_alloc_size = 0;
1468 	struct btrfs_key ins;
1469 	unsigned clear_bits;
1470 	unsigned long page_ops;
1471 	int ret = 0;
1472 
1473 	if (btrfs_is_shutdown(fs_info)) {
1474 		ret = -EIO;
1475 		goto out_unlock;
1476 	}
1477 
1478 	if (btrfs_is_free_space_inode(inode)) {
1479 		ret = -EINVAL;
1480 		goto out_unlock;
1481 	}
1482 
1483 	num_bytes = ALIGN(end - start + 1, blocksize);
1484 	num_bytes = max(blocksize,  num_bytes);
1485 	ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy));
1486 
1487 	inode_should_defrag(inode, start, end, num_bytes, SZ_64K);
1488 
1489 	if (!(flags & COW_FILE_RANGE_NO_INLINE)) {
1490 		/* lets try to make an inline extent */
1491 		ret = cow_file_range_inline(inode, locked_folio, start, end, 0,
1492 					    BTRFS_COMPRESS_NONE, NULL, false);
1493 		if (ret <= 0) {
1494 			/*
1495 			 * We succeeded, return 1 so the caller knows we're done
1496 			 * with this page and already handled the IO.
1497 			 *
1498 			 * If there was an error then cow_file_range_inline() has
1499 			 * already done the cleanup.
1500 			 */
1501 			if (ret == 0)
1502 				ret = 1;
1503 			goto done;
1504 		}
1505 	}
1506 
1507 	alloc_hint = btrfs_get_extent_allocation_hint(inode, start, num_bytes);
1508 
1509 	/*
1510 	 * We're not doing compressed IO, don't unlock the first page (which
1511 	 * the caller expects to stay locked), don't clear any dirty bits and
1512 	 * don't set any writeback bits.
1513 	 *
1514 	 * Do set the Ordered (Private2) bit so we know this page was properly
1515 	 * setup for writepage.
1516 	 */
1517 	page_ops = ((flags & COW_FILE_RANGE_KEEP_LOCKED) ? 0 : PAGE_UNLOCK);
1518 	page_ops |= PAGE_SET_ORDERED;
1519 
1520 	/*
1521 	 * Relocation relies on the relocated extents to have exactly the same
1522 	 * size as the original extents. Normally writeback for relocation data
1523 	 * extents follows a NOCOW path because relocation preallocates the
1524 	 * extents. However, due to an operation such as scrub turning a block
1525 	 * group to RO mode, it may fallback to COW mode, so we must make sure
1526 	 * an extent allocated during COW has exactly the requested size and can
1527 	 * not be split into smaller extents, otherwise relocation breaks and
1528 	 * fails during the stage where it updates the bytenr of file extent
1529 	 * items.
1530 	 */
1531 	if (btrfs_is_data_reloc_root(root))
1532 		min_alloc_size = num_bytes;
1533 	else
1534 		min_alloc_size = fs_info->sectorsize;
1535 
1536 	while (num_bytes > 0) {
1537 		ret = cow_one_range(inode, locked_folio, &ins, &cached, start,
1538 				    num_bytes, min_alloc_size, alloc_hint, &cur_alloc_size);
1539 
1540 		if (ret == -EAGAIN) {
1541 			/*
1542 			 * cow_one_range() only returns -EAGAIN for zoned
1543 			 * file systems (from btrfs_reserve_extent()), which
1544 			 * is an indication that there are
1545 			 * no active zones to allocate from at the moment.
1546 			 *
1547 			 * If this is the first loop iteration, wait for at
1548 			 * least one zone to finish before retrying the
1549 			 * allocation.  Otherwise ask the caller to write out
1550 			 * the already allocated blocks before coming back to
1551 			 * us, or return -ENOSPC if it can't handle retries.
1552 			 */
1553 			ASSERT(btrfs_is_zoned(fs_info));
1554 			if (start == orig_start) {
1555 				wait_on_bit_io(&inode->root->fs_info->flags,
1556 					       BTRFS_FS_NEED_ZONE_FINISH,
1557 					       TASK_UNINTERRUPTIBLE);
1558 				continue;
1559 			}
1560 			if (done_offset) {
1561 				/*
1562 				 * Move @end to the end of the processed range,
1563 				 * and exit the loop to unlock the processed extents.
1564 				 */
1565 				end = start - 1;
1566 				ret = 0;
1567 				break;
1568 			}
1569 			ret = -ENOSPC;
1570 		}
1571 		if (ret < 0)
1572 			goto out_unlock;
1573 
1574 		/* We should not allocate an extent larger than requested.*/
1575 		ASSERT(cur_alloc_size <= num_bytes);
1576 
1577 		num_bytes -= cur_alloc_size;
1578 		alloc_hint = ins.objectid + ins.offset;
1579 		start += cur_alloc_size;
1580 		cur_alloc_size = 0;
1581 	}
1582 	extent_clear_unlock_delalloc(inode, orig_start, end, locked_folio, &cached,
1583 				     EXTENT_LOCKED | EXTENT_DELALLOC, page_ops);
1584 done:
1585 	if (done_offset)
1586 		*done_offset = end;
1587 	return ret;
1588 
1589 out_unlock:
1590 	/*
1591 	 * Now, we have three regions to clean up:
1592 	 *
1593 	 * |-------(1)----|---(2)---|-------------(3)----------|
1594 	 * `- orig_start  `- start  `- start + cur_alloc_size  `- end
1595 	 *
1596 	 * We process each region below.
1597 	 */
1598 
1599 	/*
1600 	 * For the range (1). We have already instantiated the ordered extents
1601 	 * for this region, thus we need to cleanup those ordered extents.
1602 	 * EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV
1603 	 * are also handled by the ordered extents cleanup.
1604 	 *
1605 	 * So here we only clear EXTENT_LOCKED and EXTENT_DELALLOC flag, and
1606 	 * finish the writeback of the involved folios, which will be never submitted.
1607 	 */
1608 	if (orig_start < start) {
1609 		clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC;
1610 		page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
1611 
1612 		if (!locked_folio)
1613 			mapping_set_error(inode->vfs_inode.i_mapping, ret);
1614 
1615 		btrfs_cleanup_ordered_extents(inode, orig_start, start - orig_start);
1616 		extent_clear_unlock_delalloc(inode, orig_start, start - 1,
1617 					     locked_folio, NULL, clear_bits, page_ops);
1618 	}
1619 
1620 	clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
1621 		     EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
1622 	page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
1623 
1624 	/*
1625 	 * For the range (2) the error handling is done by cow_one_range() itself.
1626 	 * Nothing needs to be done.
1627 	 *
1628 	 * For the range (3). We never touched the region. In addition to the
1629 	 * clear_bits above, we add EXTENT_CLEAR_DATA_RESV to release the data
1630 	 * space_info's bytes_may_use counter, reserved in
1631 	 * btrfs_check_data_free_space().
1632 	 */
1633 	if (start + cur_alloc_size < end) {
1634 		clear_bits |= EXTENT_CLEAR_DATA_RESV;
1635 		extent_clear_unlock_delalloc(inode, start + cur_alloc_size,
1636 					     end, locked_folio,
1637 					     &cached, clear_bits, page_ops);
1638 		btrfs_qgroup_free_data(inode, NULL, start + cur_alloc_size,
1639 				       end - start - cur_alloc_size + 1, NULL);
1640 	}
1641 	btrfs_err(fs_info,
1642 "%s failed, root=%llu inode=%llu start=%llu len=%llu cur_offset=%llu cur_alloc_size=%u: %d",
1643 		  __func__, btrfs_root_id(inode->root),
1644 		  btrfs_ino(inode), orig_start, end + 1 - orig_start,
1645 		  start, cur_alloc_size, ret);
1646 	return ret;
1647 }
1648 
1649 /*
1650  * Phase two of compressed writeback.  This is the ordered portion of the code,
1651  * which only gets called in the order the work was queued.  We walk all the
1652  * async extents created by compress_file_range and send them down to the disk.
1653  *
1654  * If called with @do_free == true then it'll try to finish the work and free
1655  * the work struct eventually.
1656  */
submit_compressed_extents(struct btrfs_work * work,bool do_free)1657 static noinline void submit_compressed_extents(struct btrfs_work *work, bool do_free)
1658 {
1659 	struct async_chunk *async_chunk = container_of(work, struct async_chunk,
1660 						     work);
1661 	struct btrfs_fs_info *fs_info = btrfs_work_owner(work);
1662 	struct async_extent *async_extent;
1663 	unsigned long nr_pages;
1664 	u64 alloc_hint = 0;
1665 
1666 	if (do_free) {
1667 		struct async_cow *async_cow;
1668 
1669 		btrfs_add_delayed_iput(async_chunk->inode);
1670 		if (async_chunk->blkcg_css)
1671 			css_put(async_chunk->blkcg_css);
1672 
1673 		async_cow = async_chunk->async_cow;
1674 		if (atomic_dec_and_test(&async_cow->num_chunks))
1675 			kvfree(async_cow);
1676 		return;
1677 	}
1678 
1679 	nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
1680 		PAGE_SHIFT;
1681 
1682 	while (!list_empty(&async_chunk->extents)) {
1683 		async_extent = list_first_entry(&async_chunk->extents,
1684 						struct async_extent, list);
1685 		list_del(&async_extent->list);
1686 		submit_one_async_extent(async_chunk, async_extent, &alloc_hint);
1687 	}
1688 
1689 	/* atomic_sub_return implies a barrier */
1690 	if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
1691 	    5 * SZ_1M)
1692 		cond_wake_up_nomb(&fs_info->async_submit_wait);
1693 }
1694 
run_delalloc_compressed(struct btrfs_inode * inode,struct folio * locked_folio,u64 start,u64 end,struct writeback_control * wbc)1695 static bool run_delalloc_compressed(struct btrfs_inode *inode,
1696 				    struct folio *locked_folio, u64 start,
1697 				    u64 end, struct writeback_control *wbc)
1698 {
1699 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1700 	struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc);
1701 	struct async_cow *ctx;
1702 	struct async_chunk *async_chunk;
1703 	unsigned long nr_pages;
1704 	u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K);
1705 	int i;
1706 	unsigned nofs_flag;
1707 	const blk_opf_t write_flags = wbc_to_write_flags(wbc);
1708 
1709 	nofs_flag = memalloc_nofs_save();
1710 	ctx = kvmalloc_flex(*ctx, chunks, num_chunks);
1711 	memalloc_nofs_restore(nofs_flag);
1712 	if (!ctx)
1713 		return false;
1714 
1715 	set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags);
1716 
1717 	async_chunk = ctx->chunks;
1718 	atomic_set(&ctx->num_chunks, num_chunks);
1719 
1720 	for (i = 0; i < num_chunks; i++) {
1721 		u64 cur_end = min(end, start + SZ_512K - 1);
1722 
1723 		/*
1724 		 * igrab is called higher up in the call chain, take only the
1725 		 * lightweight reference for the callback lifetime
1726 		 */
1727 		ihold(&inode->vfs_inode);
1728 		async_chunk[i].async_cow = ctx;
1729 		async_chunk[i].inode = inode;
1730 		async_chunk[i].start = start;
1731 		async_chunk[i].end = cur_end;
1732 		async_chunk[i].write_flags = write_flags;
1733 		INIT_LIST_HEAD(&async_chunk[i].extents);
1734 
1735 		/*
1736 		 * The locked_folio comes all the way from writepage and its
1737 		 * the original folio we were actually given.  As we spread
1738 		 * this large delalloc region across multiple async_chunk
1739 		 * structs, only the first struct needs a pointer to
1740 		 * locked_folio.
1741 		 *
1742 		 * This way we don't need racey decisions about who is supposed
1743 		 * to unlock it.
1744 		 */
1745 		if (locked_folio) {
1746 			/*
1747 			 * Depending on the compressibility, the pages might or
1748 			 * might not go through async.  We want all of them to
1749 			 * be accounted against wbc once.  Let's do it here
1750 			 * before the paths diverge.  wbc accounting is used
1751 			 * only for foreign writeback detection and doesn't
1752 			 * need full accuracy.  Just account the whole thing
1753 			 * against the first page.
1754 			 */
1755 			wbc_account_cgroup_owner(wbc, locked_folio,
1756 						 cur_end - start);
1757 			async_chunk[i].locked_folio = locked_folio;
1758 			locked_folio = NULL;
1759 		} else {
1760 			async_chunk[i].locked_folio = NULL;
1761 		}
1762 
1763 		if (blkcg_css != blkcg_root_css) {
1764 			css_get(blkcg_css);
1765 			async_chunk[i].blkcg_css = blkcg_css;
1766 			async_chunk[i].write_flags |= REQ_BTRFS_CGROUP_PUNT;
1767 		} else {
1768 			async_chunk[i].blkcg_css = NULL;
1769 		}
1770 
1771 		btrfs_init_work(&async_chunk[i].work, compress_file_range,
1772 				submit_compressed_extents);
1773 
1774 		nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
1775 		atomic_add(nr_pages, &fs_info->async_delalloc_pages);
1776 
1777 		btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work);
1778 
1779 		start = cur_end + 1;
1780 	}
1781 	return true;
1782 }
1783 
1784 /*
1785  * Run the delalloc range from start to end, and write back any dirty pages
1786  * covered by the range.
1787  */
run_delalloc_cow(struct btrfs_inode * inode,struct folio * locked_folio,u64 start,u64 end,struct writeback_control * wbc,bool pages_dirty)1788 static noinline int run_delalloc_cow(struct btrfs_inode *inode,
1789 				     struct folio *locked_folio, u64 start,
1790 				     u64 end, struct writeback_control *wbc,
1791 				     bool pages_dirty)
1792 {
1793 	u64 done_offset = end;
1794 	int ret;
1795 
1796 	while (start <= end) {
1797 		ret = cow_file_range(inode, locked_folio, start, end,
1798 				     &done_offset, COW_FILE_RANGE_KEEP_LOCKED);
1799 		if (ret)
1800 			return ret;
1801 		extent_write_locked_range(&inode->vfs_inode, locked_folio,
1802 					  start, done_offset, wbc, pages_dirty);
1803 		start = done_offset + 1;
1804 	}
1805 
1806 	return 1;
1807 }
1808 
fallback_to_cow(struct btrfs_inode * inode,struct folio * locked_folio,const u64 start,const u64 end)1809 static int fallback_to_cow(struct btrfs_inode *inode,
1810 			   struct folio *locked_folio, const u64 start,
1811 			   const u64 end)
1812 {
1813 	const bool is_space_ino = btrfs_is_free_space_inode(inode);
1814 	const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root);
1815 	const u64 range_bytes = end + 1 - start;
1816 	struct extent_io_tree *io_tree = &inode->io_tree;
1817 	struct extent_state *cached_state = NULL;
1818 	u64 range_start = start;
1819 	u64 count;
1820 	int ret;
1821 
1822 	/*
1823 	 * If EXTENT_NORESERVE is set it means that when the buffered write was
1824 	 * made we had not enough available data space and therefore we did not
1825 	 * reserve data space for it, since we though we could do NOCOW for the
1826 	 * respective file range (either there is prealloc extent or the inode
1827 	 * has the NOCOW bit set).
1828 	 *
1829 	 * However when we need to fallback to COW mode (because for example the
1830 	 * block group for the corresponding extent was turned to RO mode by a
1831 	 * scrub or relocation) we need to do the following:
1832 	 *
1833 	 * 1) We increment the bytes_may_use counter of the data space info.
1834 	 *    If COW succeeds, it allocates a new data extent and after doing
1835 	 *    that it decrements the space info's bytes_may_use counter and
1836 	 *    increments its bytes_reserved counter by the same amount (we do
1837 	 *    this at btrfs_add_reserved_bytes()). So we need to increment the
1838 	 *    bytes_may_use counter to compensate (when space is reserved at
1839 	 *    buffered write time, the bytes_may_use counter is incremented);
1840 	 *
1841 	 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so
1842 	 *    that if the COW path fails for any reason, it decrements (through
1843 	 *    extent_clear_unlock_delalloc()) the bytes_may_use counter of the
1844 	 *    data space info, which we incremented in the step above.
1845 	 *
1846 	 * If we need to fallback to cow and the inode corresponds to a free
1847 	 * space cache inode or an inode of the data relocation tree, we must
1848 	 * also increment bytes_may_use of the data space_info for the same
1849 	 * reason. Space caches and relocated data extents always get a prealloc
1850 	 * extent for them, however scrub or balance may have set the block
1851 	 * group that contains that extent to RO mode and therefore force COW
1852 	 * when starting writeback.
1853 	 */
1854 	btrfs_lock_extent(io_tree, start, end, &cached_state);
1855 	count = btrfs_count_range_bits(io_tree, &range_start, end, range_bytes,
1856 				       EXTENT_NORESERVE, 0, NULL);
1857 	if (count > 0 || is_space_ino || is_reloc_ino) {
1858 		u64 bytes = count;
1859 		struct btrfs_fs_info *fs_info = inode->root->fs_info;
1860 		struct btrfs_space_info *sinfo = fs_info->data_sinfo;
1861 
1862 		if (is_space_ino || is_reloc_ino)
1863 			bytes = range_bytes;
1864 
1865 		spin_lock(&sinfo->lock);
1866 		btrfs_space_info_update_bytes_may_use(sinfo, bytes);
1867 		spin_unlock(&sinfo->lock);
1868 
1869 		if (count > 0)
1870 			btrfs_clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE,
1871 					       &cached_state);
1872 	}
1873 	btrfs_unlock_extent(io_tree, start, end, &cached_state);
1874 
1875 	/*
1876 	 * Don't try to create inline extents, as a mix of inline extent that
1877 	 * is written out and unlocked directly and a normal NOCOW extent
1878 	 * doesn't work.
1879 	 *
1880 	 * And here we do not unlock the folio after a successful run.
1881 	 * The folios will be unlocked after everything is finished, or by error handling.
1882 	 *
1883 	 * This is to ensure error handling won't need to clear dirty/ordered flags without
1884 	 * a locked folio, which can race with writeback.
1885 	 */
1886 	ret = cow_file_range(inode, locked_folio, start, end, NULL,
1887 			     COW_FILE_RANGE_NO_INLINE | COW_FILE_RANGE_KEEP_LOCKED);
1888 	ASSERT(ret != 1);
1889 	return ret;
1890 }
1891 
1892 struct can_nocow_file_extent_args {
1893 	/* Input fields. */
1894 
1895 	/* Start file offset of the range we want to NOCOW. */
1896 	u64 start;
1897 	/* End file offset (inclusive) of the range we want to NOCOW. */
1898 	u64 end;
1899 	bool writeback_path;
1900 	/*
1901 	 * Free the path passed to can_nocow_file_extent() once it's not needed
1902 	 * anymore.
1903 	 */
1904 	bool free_path;
1905 
1906 	/*
1907 	 * Output fields. Only set when can_nocow_file_extent() returns 1.
1908 	 * The expected file extent for the NOCOW write.
1909 	 */
1910 	struct btrfs_file_extent file_extent;
1911 };
1912 
1913 /*
1914  * Check if we can NOCOW the file extent that the path points to.
1915  * This function may return with the path released, so the caller should check
1916  * if path->nodes[0] is NULL or not if it needs to use the path afterwards.
1917  *
1918  * Returns: < 0 on error
1919  *            0 if we can not NOCOW
1920  *            1 if we can NOCOW
1921  */
can_nocow_file_extent(struct btrfs_path * path,struct btrfs_key * key,struct btrfs_inode * inode,struct can_nocow_file_extent_args * args)1922 static int can_nocow_file_extent(struct btrfs_path *path,
1923 				 struct btrfs_key *key,
1924 				 struct btrfs_inode *inode,
1925 				 struct can_nocow_file_extent_args *args)
1926 {
1927 	const bool is_freespace_inode = btrfs_is_free_space_inode(inode);
1928 	struct extent_buffer *leaf = path->nodes[0];
1929 	struct btrfs_root *root = inode->root;
1930 	struct btrfs_file_extent_item *fi;
1931 	struct btrfs_root *csum_root;
1932 	u64 io_start;
1933 	u64 extent_end;
1934 	u8 extent_type;
1935 	int can_nocow = 0;
1936 	int ret = 0;
1937 	bool nowait = path->nowait;
1938 
1939 	fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
1940 	extent_type = btrfs_file_extent_type(leaf, fi);
1941 
1942 	if (extent_type == BTRFS_FILE_EXTENT_INLINE)
1943 		goto out;
1944 
1945 	if (!(inode->flags & BTRFS_INODE_NODATACOW) &&
1946 	    extent_type == BTRFS_FILE_EXTENT_REG)
1947 		goto out;
1948 
1949 	/*
1950 	 * If the extent was created before the generation where the last snapshot
1951 	 * for its subvolume was created, then this implies the extent is shared,
1952 	 * hence we must COW.
1953 	 */
1954 	if (btrfs_file_extent_generation(leaf, fi) <=
1955 	    btrfs_root_last_snapshot(&root->root_item))
1956 		goto out;
1957 
1958 	/* An explicit hole, must COW. */
1959 	if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
1960 		goto out;
1961 
1962 	/* Compressed/encrypted/encoded extents must be COWed. */
1963 	if (btrfs_file_extent_compression(leaf, fi) ||
1964 	    btrfs_file_extent_encryption(leaf, fi) ||
1965 	    btrfs_file_extent_other_encoding(leaf, fi))
1966 		goto out;
1967 
1968 	extent_end = btrfs_file_extent_end(path);
1969 
1970 	args->file_extent.disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1971 	args->file_extent.disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1972 	args->file_extent.ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1973 	args->file_extent.offset = btrfs_file_extent_offset(leaf, fi);
1974 	args->file_extent.compression = btrfs_file_extent_compression(leaf, fi);
1975 
1976 	/*
1977 	 * The following checks can be expensive, as they need to take other
1978 	 * locks and do btree or rbtree searches, so release the path to avoid
1979 	 * blocking other tasks for too long.
1980 	 */
1981 	btrfs_release_path(path);
1982 
1983 	ret = btrfs_cross_ref_exist(inode, key->offset - args->file_extent.offset,
1984 				    args->file_extent.disk_bytenr, path);
1985 	WARN_ON_ONCE(ret > 0 && is_freespace_inode);
1986 	if (ret != 0)
1987 		goto out;
1988 
1989 	if (args->free_path) {
1990 		/*
1991 		 * We don't need the path anymore, plus through the
1992 		 * btrfs_lookup_csums_list() call below we will end up allocating
1993 		 * another path. So free the path to avoid unnecessary extra
1994 		 * memory usage.
1995 		 */
1996 		btrfs_free_path(path);
1997 		path = NULL;
1998 	}
1999 
2000 	/* If there are pending snapshots for this root, we must COW. */
2001 	if (args->writeback_path && !is_freespace_inode &&
2002 	    atomic_read(&root->snapshot_force_cow))
2003 		goto out;
2004 
2005 	args->file_extent.num_bytes = min(args->end + 1, extent_end) - args->start;
2006 	args->file_extent.offset += args->start - key->offset;
2007 	io_start = args->file_extent.disk_bytenr + args->file_extent.offset;
2008 
2009 	/*
2010 	 * Force COW if csums exist in the range. This ensures that csums for a
2011 	 * given extent are either valid or do not exist.
2012 	 */
2013 
2014 	csum_root = btrfs_csum_root(root->fs_info, io_start);
2015 	ret = btrfs_lookup_csums_list(csum_root, io_start,
2016 				      io_start + args->file_extent.num_bytes - 1,
2017 				      NULL, nowait);
2018 	WARN_ON_ONCE(ret > 0 && is_freespace_inode);
2019 	if (ret != 0)
2020 		goto out;
2021 
2022 	can_nocow = 1;
2023  out:
2024 	if (args->free_path && path)
2025 		btrfs_free_path(path);
2026 
2027 	return ret < 0 ? ret : can_nocow;
2028 }
2029 
nocow_one_range(struct btrfs_inode * inode,struct folio * locked_folio,struct extent_state ** cached,struct can_nocow_file_extent_args * nocow_args,u64 file_pos,bool is_prealloc)2030 static int nocow_one_range(struct btrfs_inode *inode, struct folio *locked_folio,
2031 			   struct extent_state **cached,
2032 			   struct can_nocow_file_extent_args *nocow_args,
2033 			   u64 file_pos, bool is_prealloc)
2034 {
2035 	struct btrfs_ordered_extent *ordered;
2036 	const u64 len = nocow_args->file_extent.num_bytes;
2037 	const u64 end = file_pos + len - 1;
2038 	int ret = 0;
2039 
2040 	btrfs_lock_extent(&inode->io_tree, file_pos, end, cached);
2041 
2042 	if (is_prealloc) {
2043 		struct extent_map *em;
2044 
2045 		em = btrfs_create_io_em(inode, file_pos, &nocow_args->file_extent,
2046 					BTRFS_ORDERED_PREALLOC);
2047 		if (IS_ERR(em)) {
2048 			ret = PTR_ERR(em);
2049 			goto error;
2050 		}
2051 		btrfs_free_extent_map(em);
2052 	}
2053 
2054 	ordered = btrfs_alloc_ordered_extent(inode, file_pos, &nocow_args->file_extent,
2055 					     is_prealloc
2056 					     ? (1U << BTRFS_ORDERED_PREALLOC)
2057 					     : (1U << BTRFS_ORDERED_NOCOW));
2058 	if (IS_ERR(ordered)) {
2059 		if (is_prealloc)
2060 			btrfs_drop_extent_map_range(inode, file_pos, end, false);
2061 		ret = PTR_ERR(ordered);
2062 		goto error;
2063 	}
2064 
2065 	if (btrfs_is_data_reloc_root(inode->root))
2066 		/*
2067 		 * Errors are handled later, as we must prevent
2068 		 * extent_clear_unlock_delalloc() in error handler from freeing
2069 		 * metadata of the created ordered extent.
2070 		 */
2071 		ret = btrfs_reloc_clone_csums(ordered);
2072 	btrfs_put_ordered_extent(ordered);
2073 
2074 	if (ret < 0)
2075 		goto error;
2076 	extent_clear_unlock_delalloc(inode, file_pos, end, locked_folio, cached,
2077 				     EXTENT_LOCKED | EXTENT_DELALLOC |
2078 				     EXTENT_CLEAR_DATA_RESV,
2079 				     PAGE_SET_ORDERED);
2080 	return ret;
2081 
2082 error:
2083 	btrfs_cleanup_ordered_extents(inode, file_pos, len);
2084 	extent_clear_unlock_delalloc(inode, file_pos, end, locked_folio, cached,
2085 				     EXTENT_LOCKED | EXTENT_DELALLOC |
2086 				     EXTENT_CLEAR_DATA_RESV,
2087 				     PAGE_UNLOCK | PAGE_START_WRITEBACK |
2088 				     PAGE_END_WRITEBACK);
2089 	btrfs_err(inode->root->fs_info,
2090 		  "%s failed, root=%lld inode=%llu start=%llu len=%llu: %d",
2091 		  __func__, btrfs_root_id(inode->root), btrfs_ino(inode),
2092 		  file_pos, len, ret);
2093 	return ret;
2094 }
2095 
2096 /*
2097  * When nocow writeback calls back.  This checks for snapshots or COW copies
2098  * of the extents that exist in the file, and COWs the file as required.
2099  *
2100  * If no cow copies or snapshots exist, we write directly to the existing
2101  * blocks on disk
2102  */
run_delalloc_nocow(struct btrfs_inode * inode,struct folio * locked_folio,const u64 start,const u64 end)2103 static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
2104 				       struct folio *locked_folio,
2105 				       const u64 start, const u64 end)
2106 {
2107 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2108 	struct btrfs_root *root = inode->root;
2109 	struct btrfs_path *path = NULL;
2110 	u64 cow_start = (u64)-1;
2111 	/*
2112 	 * If not 0, represents the inclusive end of the last fallback_to_cow()
2113 	 * range. Only for error handling.
2114 	 *
2115 	 * The same for nocow_end, it's to avoid double cleaning up the range
2116 	 * already cleaned by nocow_one_range().
2117 	 */
2118 	u64 cow_end = 0;
2119 	u64 nocow_end = 0;
2120 	u64 cur_offset = start;
2121 	int ret;
2122 	bool check_prev = true;
2123 	u64 ino = btrfs_ino(inode);
2124 	struct can_nocow_file_extent_args nocow_args = { 0 };
2125 	/* The range that has ordered extent(s). */
2126 	u64 oe_cleanup_start;
2127 	u64 oe_cleanup_len = 0;
2128 	/* The range that is untouched. */
2129 	u64 untouched_start;
2130 	u64 untouched_len = 0;
2131 
2132 	/*
2133 	 * Normally on a zoned device we're only doing COW writes, but in case
2134 	 * of relocation on a zoned filesystem serializes I/O so that we're only
2135 	 * writing sequentially and can end up here as well.
2136 	 */
2137 	ASSERT(!btrfs_is_zoned(fs_info) || btrfs_is_data_reloc_root(root));
2138 
2139 	if (btrfs_is_shutdown(fs_info)) {
2140 		ret = -EIO;
2141 		goto error;
2142 	}
2143 	path = btrfs_alloc_path();
2144 	if (!path) {
2145 		ret = -ENOMEM;
2146 		goto error;
2147 	}
2148 
2149 	nocow_args.end = end;
2150 	nocow_args.writeback_path = true;
2151 
2152 	while (cur_offset <= end) {
2153 		struct btrfs_block_group *nocow_bg = NULL;
2154 		struct btrfs_key found_key;
2155 		struct btrfs_file_extent_item *fi;
2156 		struct extent_buffer *leaf;
2157 		struct extent_state *cached_state = NULL;
2158 		u64 extent_end;
2159 		int extent_type;
2160 
2161 		ret = btrfs_lookup_file_extent(NULL, root, path, ino,
2162 					       cur_offset, 0);
2163 		if (ret < 0)
2164 			goto error;
2165 
2166 		/*
2167 		 * If there is no extent for our range when doing the initial
2168 		 * search, then go back to the previous slot as it will be the
2169 		 * one containing the search offset
2170 		 */
2171 		if (ret > 0 && path->slots[0] > 0 && check_prev) {
2172 			leaf = path->nodes[0];
2173 			btrfs_item_key_to_cpu(leaf, &found_key,
2174 					      path->slots[0] - 1);
2175 			if (found_key.objectid == ino &&
2176 			    found_key.type == BTRFS_EXTENT_DATA_KEY)
2177 				path->slots[0]--;
2178 		}
2179 		check_prev = false;
2180 next_slot:
2181 		/* Go to next leaf if we have exhausted the current one */
2182 		leaf = path->nodes[0];
2183 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2184 			ret = btrfs_next_leaf(root, path);
2185 			if (ret < 0)
2186 				goto error;
2187 			if (ret > 0)
2188 				break;
2189 			leaf = path->nodes[0];
2190 		}
2191 
2192 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2193 
2194 		/* Didn't find anything for our INO */
2195 		if (found_key.objectid > ino)
2196 			break;
2197 		/*
2198 		 * Keep searching until we find an EXTENT_ITEM or there are no
2199 		 * more extents for this inode
2200 		 */
2201 		if (WARN_ON_ONCE(found_key.objectid < ino) ||
2202 		    found_key.type < BTRFS_EXTENT_DATA_KEY) {
2203 			path->slots[0]++;
2204 			goto next_slot;
2205 		}
2206 
2207 		/* Found key is not EXTENT_DATA_KEY or starts after req range */
2208 		if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
2209 		    found_key.offset > end)
2210 			break;
2211 
2212 		/*
2213 		 * If the found extent starts after requested offset, then
2214 		 * adjust cur_offset to be right before this extent begins.
2215 		 */
2216 		if (found_key.offset > cur_offset) {
2217 			if (cow_start == (u64)-1)
2218 				cow_start = cur_offset;
2219 			cur_offset = found_key.offset;
2220 			goto next_slot;
2221 		}
2222 
2223 		/*
2224 		 * Found extent which begins before our range and potentially
2225 		 * intersect it
2226 		 */
2227 		fi = btrfs_item_ptr(leaf, path->slots[0],
2228 				    struct btrfs_file_extent_item);
2229 		extent_type = btrfs_file_extent_type(leaf, fi);
2230 		/* If this is triggered then we have a memory corruption. */
2231 		ASSERT(extent_type < BTRFS_NR_FILE_EXTENT_TYPES);
2232 		if (WARN_ON(extent_type >= BTRFS_NR_FILE_EXTENT_TYPES)) {
2233 			ret = -EUCLEAN;
2234 			goto error;
2235 		}
2236 		extent_end = btrfs_file_extent_end(path);
2237 
2238 		/*
2239 		 * If the extent we got ends before our current offset, skip to
2240 		 * the next extent.
2241 		 */
2242 		if (extent_end <= cur_offset) {
2243 			path->slots[0]++;
2244 			goto next_slot;
2245 		}
2246 
2247 		nocow_args.start = cur_offset;
2248 		ret = can_nocow_file_extent(path, &found_key, inode, &nocow_args);
2249 		if (ret < 0)
2250 			goto error;
2251 		if (ret == 0)
2252 			goto must_cow;
2253 
2254 		ret = 0;
2255 		nocow_bg = btrfs_inc_nocow_writers(fs_info,
2256 				nocow_args.file_extent.disk_bytenr +
2257 				nocow_args.file_extent.offset);
2258 		if (!nocow_bg) {
2259 must_cow:
2260 			/*
2261 			 * If we can't perform NOCOW writeback for the range,
2262 			 * then record the beginning of the range that needs to
2263 			 * be COWed.  It will be written out before the next
2264 			 * NOCOW range if we find one, or when exiting this
2265 			 * loop.
2266 			 */
2267 			if (cow_start == (u64)-1)
2268 				cow_start = cur_offset;
2269 			cur_offset = extent_end;
2270 			if (cur_offset > end)
2271 				break;
2272 			if (!path->nodes[0])
2273 				continue;
2274 			path->slots[0]++;
2275 			goto next_slot;
2276 		}
2277 
2278 		/*
2279 		 * COW range from cow_start to found_key.offset - 1. As the key
2280 		 * will contain the beginning of the first extent that can be
2281 		 * NOCOW, following one which needs to be COW'ed
2282 		 */
2283 		if (cow_start != (u64)-1) {
2284 			ret = fallback_to_cow(inode, locked_folio, cow_start,
2285 					      found_key.offset - 1);
2286 			if (ret) {
2287 				cow_end = found_key.offset - 1;
2288 				btrfs_dec_nocow_writers(nocow_bg);
2289 				goto error;
2290 			}
2291 			cow_start = (u64)-1;
2292 		}
2293 
2294 		ret = nocow_one_range(inode, locked_folio, &cached_state,
2295 				      &nocow_args, cur_offset,
2296 				      extent_type == BTRFS_FILE_EXTENT_PREALLOC);
2297 		btrfs_dec_nocow_writers(nocow_bg);
2298 		if (ret < 0) {
2299 			nocow_end = cur_offset + nocow_args.file_extent.num_bytes - 1;
2300 			goto error;
2301 		}
2302 		cur_offset = extent_end;
2303 	}
2304 	btrfs_release_path(path);
2305 
2306 	if (cur_offset <= end && cow_start == (u64)-1)
2307 		cow_start = cur_offset;
2308 
2309 	if (cow_start != (u64)-1) {
2310 		ret = fallback_to_cow(inode, locked_folio, cow_start, end);
2311 		if (ret) {
2312 			cow_end = end;
2313 			goto error;
2314 		}
2315 		cow_start = (u64)-1;
2316 	}
2317 
2318 	/*
2319 	 * Everything is finished without an error, can unlock the folios now.
2320 	 *
2321 	 * No need to touch the io tree range nor set folio ordered flag, as
2322 	 * fallback_to_cow() and nocow_one_range() have already handled them.
2323 	 */
2324 	extent_clear_unlock_delalloc(inode, start, end, locked_folio, NULL, 0, PAGE_UNLOCK);
2325 
2326 	btrfs_free_path(path);
2327 	return 0;
2328 
2329 error:
2330 	if (cow_start == (u64)-1) {
2331 		/*
2332 		 * case a)
2333 		 *    start           cur_offset               end
2334 		 *    |   OE cleanup  |       Untouched        |
2335 		 *
2336 		 * We finished a fallback_to_cow() or nocow_one_range() call,
2337 		 * but failed to check the next range.
2338 		 *
2339 		 * or
2340 		 *    start           cur_offset   nocow_end   end
2341 		 *    |   OE cleanup  |   Skip     | Untouched |
2342 		 *
2343 		 * nocow_one_range() failed, the range [cur_offset, nocow_end] is
2344 		 * already cleaned up.
2345 		 */
2346 		oe_cleanup_start = start;
2347 		oe_cleanup_len = cur_offset - start;
2348 		if (nocow_end)
2349 			untouched_start = nocow_end + 1;
2350 		else
2351 			untouched_start = cur_offset;
2352 		untouched_len = end + 1 - untouched_start;
2353 	} else if (cow_start != (u64)-1 && cow_end == 0) {
2354 		/*
2355 		 * case b)
2356 		 *    start        cow_start    cur_offset   end
2357 		 *    | OE cleanup |        Untouched        |
2358 		 *
2359 		 * We got a range that needs COW, but before we hit the next NOCOW range,
2360 		 * thus [cow_start, cur_offset) doesn't yet have any OE.
2361 		 */
2362 		oe_cleanup_start = start;
2363 		oe_cleanup_len = cow_start - start;
2364 		untouched_start = cow_start;
2365 		untouched_len = end + 1 - untouched_start;
2366 	} else {
2367 		/*
2368 		 * case c)
2369 		 *    start        cow_start    cow_end      end
2370 		 *    | OE cleanup |   Skip     |  Untouched |
2371 		 *
2372 		 * fallback_to_cow() failed, and fallback_to_cow() will do the
2373 		 * cleanup for its range, we shouldn't touch the range
2374 		 * [cow_start, cow_end].
2375 		 */
2376 		ASSERT(cow_start != (u64)-1 && cow_end != 0);
2377 		oe_cleanup_start = start;
2378 		oe_cleanup_len = cow_start - start;
2379 		untouched_start = cow_end + 1;
2380 		untouched_len = end + 1 - untouched_start;
2381 	}
2382 
2383 	if (oe_cleanup_len) {
2384 		const u64 oe_cleanup_end = oe_cleanup_start + oe_cleanup_len - 1;
2385 		btrfs_cleanup_ordered_extents(inode, oe_cleanup_start, oe_cleanup_len);
2386 		extent_clear_unlock_delalloc(inode, oe_cleanup_start, oe_cleanup_end,
2387 					     locked_folio, NULL,
2388 					     EXTENT_LOCKED | EXTENT_DELALLOC,
2389 					     PAGE_UNLOCK | PAGE_START_WRITEBACK |
2390 					     PAGE_END_WRITEBACK);
2391 	}
2392 
2393 	if (untouched_len) {
2394 		struct extent_state *cached = NULL;
2395 		const u64 untouched_end = untouched_start + untouched_len - 1;
2396 
2397 		/*
2398 		 * We need to lock the extent here because we're clearing DELALLOC and
2399 		 * we're not locked at this point.
2400 		 */
2401 		btrfs_lock_extent(&inode->io_tree, untouched_start, untouched_end, &cached);
2402 		extent_clear_unlock_delalloc(inode, untouched_start, untouched_end,
2403 					     locked_folio, &cached,
2404 					     EXTENT_LOCKED | EXTENT_DELALLOC |
2405 					     EXTENT_DEFRAG |
2406 					     EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
2407 					     PAGE_START_WRITEBACK |
2408 					     PAGE_END_WRITEBACK);
2409 		btrfs_qgroup_free_data(inode, NULL, untouched_start, untouched_len, NULL);
2410 	}
2411 	btrfs_free_path(path);
2412 	btrfs_err(fs_info,
2413 "%s failed, root=%llu inode=%llu start=%llu len=%llu cur_offset=%llu oe_cleanup=%llu oe_cleanup_len=%llu untouched_start=%llu untouched_len=%llu: %d",
2414 		  __func__, btrfs_root_id(inode->root), btrfs_ino(inode),
2415 		  start, end + 1 - start, cur_offset, oe_cleanup_start, oe_cleanup_len,
2416 		  untouched_start, untouched_len, ret);
2417 	return ret;
2418 }
2419 
should_nocow(struct btrfs_inode * inode,u64 start,u64 end)2420 static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end)
2421 {
2422 	if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) {
2423 		if (inode->defrag_bytes &&
2424 		    btrfs_test_range_bit_exists(&inode->io_tree, start, end, EXTENT_DEFRAG))
2425 			return false;
2426 		return true;
2427 	}
2428 	return false;
2429 }
2430 
2431 /*
2432  * Function to process delayed allocation (create CoW) for ranges which are
2433  * being touched for the first time.
2434  */
btrfs_run_delalloc_range(struct btrfs_inode * inode,struct folio * locked_folio,u64 start,u64 end,struct writeback_control * wbc)2435 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_folio,
2436 			     u64 start, u64 end, struct writeback_control *wbc)
2437 {
2438 	const bool zoned = btrfs_is_zoned(inode->root->fs_info);
2439 
2440 	/*
2441 	 * The range must cover part of the @locked_folio, or a return of 1
2442 	 * can confuse the caller.
2443 	 */
2444 	ASSERT(!(end <= folio_pos(locked_folio) ||
2445 		 start >= folio_next_pos(locked_folio)));
2446 
2447 	if (should_nocow(inode, start, end))
2448 		return run_delalloc_nocow(inode, locked_folio, start, end);
2449 
2450 	if (btrfs_inode_can_compress(inode) &&
2451 	    inode_need_compress(inode, start, end) &&
2452 	    run_delalloc_compressed(inode, locked_folio, start, end, wbc))
2453 		return 1;
2454 
2455 	if (zoned)
2456 		return run_delalloc_cow(inode, locked_folio, start, end, wbc, true);
2457 	else
2458 		return cow_file_range(inode, locked_folio, start, end, NULL, 0);
2459 }
2460 
btrfs_split_delalloc_extent(struct btrfs_inode * inode,struct extent_state * orig,u64 split)2461 void btrfs_split_delalloc_extent(struct btrfs_inode *inode,
2462 				 struct extent_state *orig, u64 split)
2463 {
2464 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2465 	u64 size;
2466 
2467 	lockdep_assert_held(&inode->io_tree.lock);
2468 
2469 	/* not delalloc, ignore it */
2470 	if (!(orig->state & EXTENT_DELALLOC))
2471 		return;
2472 
2473 	size = orig->end - orig->start + 1;
2474 	if (size > fs_info->max_extent_size) {
2475 		u32 num_extents;
2476 		u64 new_size;
2477 
2478 		/*
2479 		 * See the explanation in btrfs_merge_delalloc_extent, the same
2480 		 * applies here, just in reverse.
2481 		 */
2482 		new_size = orig->end - split + 1;
2483 		num_extents = count_max_extents(fs_info, new_size);
2484 		new_size = split - orig->start;
2485 		num_extents += count_max_extents(fs_info, new_size);
2486 		if (count_max_extents(fs_info, size) >= num_extents)
2487 			return;
2488 	}
2489 
2490 	spin_lock(&inode->lock);
2491 	btrfs_mod_outstanding_extents(inode, 1);
2492 	spin_unlock(&inode->lock);
2493 }
2494 
2495 /*
2496  * Handle merged delayed allocation extents so we can keep track of new extents
2497  * that are just merged onto old extents, such as when we are doing sequential
2498  * writes, so we can properly account for the metadata space we'll need.
2499  */
btrfs_merge_delalloc_extent(struct btrfs_inode * inode,struct extent_state * new,struct extent_state * other)2500 void btrfs_merge_delalloc_extent(struct btrfs_inode *inode, struct extent_state *new,
2501 				 struct extent_state *other)
2502 {
2503 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2504 	u64 new_size, old_size;
2505 	u32 num_extents;
2506 
2507 	lockdep_assert_held(&inode->io_tree.lock);
2508 
2509 	/* not delalloc, ignore it */
2510 	if (!(other->state & EXTENT_DELALLOC))
2511 		return;
2512 
2513 	if (new->start > other->start)
2514 		new_size = new->end - other->start + 1;
2515 	else
2516 		new_size = other->end - new->start + 1;
2517 
2518 	/* we're not bigger than the max, unreserve the space and go */
2519 	if (new_size <= fs_info->max_extent_size) {
2520 		spin_lock(&inode->lock);
2521 		btrfs_mod_outstanding_extents(inode, -1);
2522 		spin_unlock(&inode->lock);
2523 		return;
2524 	}
2525 
2526 	/*
2527 	 * We have to add up either side to figure out how many extents were
2528 	 * accounted for before we merged into one big extent.  If the number of
2529 	 * extents we accounted for is <= the amount we need for the new range
2530 	 * then we can return, otherwise drop.  Think of it like this
2531 	 *
2532 	 * [ 4k][MAX_SIZE]
2533 	 *
2534 	 * So we've grown the extent by a MAX_SIZE extent, this would mean we
2535 	 * need 2 outstanding extents, on one side we have 1 and the other side
2536 	 * we have 1 so they are == and we can return.  But in this case
2537 	 *
2538 	 * [MAX_SIZE+4k][MAX_SIZE+4k]
2539 	 *
2540 	 * Each range on their own accounts for 2 extents, but merged together
2541 	 * they are only 3 extents worth of accounting, so we need to drop in
2542 	 * this case.
2543 	 */
2544 	old_size = other->end - other->start + 1;
2545 	num_extents = count_max_extents(fs_info, old_size);
2546 	old_size = new->end - new->start + 1;
2547 	num_extents += count_max_extents(fs_info, old_size);
2548 	if (count_max_extents(fs_info, new_size) >= num_extents)
2549 		return;
2550 
2551 	spin_lock(&inode->lock);
2552 	btrfs_mod_outstanding_extents(inode, -1);
2553 	spin_unlock(&inode->lock);
2554 }
2555 
btrfs_add_delalloc_inode(struct btrfs_inode * inode)2556 static void btrfs_add_delalloc_inode(struct btrfs_inode *inode)
2557 {
2558 	struct btrfs_root *root = inode->root;
2559 	struct btrfs_fs_info *fs_info = root->fs_info;
2560 
2561 	spin_lock(&root->delalloc_lock);
2562 	ASSERT(list_empty(&inode->delalloc_inodes));
2563 	list_add_tail(&inode->delalloc_inodes, &root->delalloc_inodes);
2564 	root->nr_delalloc_inodes++;
2565 	if (root->nr_delalloc_inodes == 1) {
2566 		spin_lock(&fs_info->delalloc_root_lock);
2567 		ASSERT(list_empty(&root->delalloc_root));
2568 		list_add_tail(&root->delalloc_root, &fs_info->delalloc_roots);
2569 		spin_unlock(&fs_info->delalloc_root_lock);
2570 	}
2571 	spin_unlock(&root->delalloc_lock);
2572 }
2573 
btrfs_del_delalloc_inode(struct btrfs_inode * inode)2574 void btrfs_del_delalloc_inode(struct btrfs_inode *inode)
2575 {
2576 	struct btrfs_root *root = inode->root;
2577 	struct btrfs_fs_info *fs_info = root->fs_info;
2578 
2579 	lockdep_assert_held(&root->delalloc_lock);
2580 
2581 	/*
2582 	 * We may be called after the inode was already deleted from the list,
2583 	 * namely in the transaction abort path btrfs_destroy_delalloc_inodes(),
2584 	 * and then later through btrfs_clear_delalloc_extent() while the inode
2585 	 * still has ->delalloc_bytes > 0.
2586 	 */
2587 	if (!list_empty(&inode->delalloc_inodes)) {
2588 		list_del_init(&inode->delalloc_inodes);
2589 		root->nr_delalloc_inodes--;
2590 		if (!root->nr_delalloc_inodes) {
2591 			ASSERT(list_empty(&root->delalloc_inodes));
2592 			spin_lock(&fs_info->delalloc_root_lock);
2593 			ASSERT(!list_empty(&root->delalloc_root));
2594 			list_del_init(&root->delalloc_root);
2595 			spin_unlock(&fs_info->delalloc_root_lock);
2596 		}
2597 	}
2598 }
2599 
2600 /*
2601  * Properly track delayed allocation bytes in the inode and to maintain the
2602  * list of inodes that have pending delalloc work to be done.
2603  */
btrfs_set_delalloc_extent(struct btrfs_inode * inode,struct extent_state * state,u32 bits)2604 void btrfs_set_delalloc_extent(struct btrfs_inode *inode, struct extent_state *state,
2605 			       u32 bits)
2606 {
2607 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2608 
2609 	lockdep_assert_held(&inode->io_tree.lock);
2610 
2611 	if ((bits & EXTENT_DEFRAG) && !(bits & EXTENT_DELALLOC))
2612 		WARN_ON(1);
2613 	/*
2614 	 * set_bit and clear bit hooks normally require _irqsave/restore
2615 	 * but in this case, we are only testing for the DELALLOC
2616 	 * bit, which is only set or cleared with irqs on
2617 	 */
2618 	if (!(state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
2619 		u64 len = state->end + 1 - state->start;
2620 		u64 prev_delalloc_bytes;
2621 		u32 num_extents = count_max_extents(fs_info, len);
2622 
2623 		spin_lock(&inode->lock);
2624 		btrfs_mod_outstanding_extents(inode, num_extents);
2625 		spin_unlock(&inode->lock);
2626 
2627 		/* For sanity tests */
2628 		if (btrfs_is_testing(fs_info))
2629 			return;
2630 
2631 		percpu_counter_add_batch(&fs_info->delalloc_bytes, len,
2632 					 fs_info->delalloc_batch);
2633 		spin_lock(&inode->lock);
2634 		prev_delalloc_bytes = inode->delalloc_bytes;
2635 		inode->delalloc_bytes += len;
2636 		if (bits & EXTENT_DEFRAG)
2637 			inode->defrag_bytes += len;
2638 		spin_unlock(&inode->lock);
2639 
2640 		/*
2641 		 * We don't need to be under the protection of the inode's lock,
2642 		 * because we are called while holding the inode's io_tree lock
2643 		 * and are therefore protected against concurrent calls of this
2644 		 * function and btrfs_clear_delalloc_extent().
2645 		 */
2646 		if (!btrfs_is_free_space_inode(inode) && prev_delalloc_bytes == 0)
2647 			btrfs_add_delalloc_inode(inode);
2648 	}
2649 
2650 	if (!(state->state & EXTENT_DELALLOC_NEW) &&
2651 	    (bits & EXTENT_DELALLOC_NEW)) {
2652 		spin_lock(&inode->lock);
2653 		inode->new_delalloc_bytes += state->end + 1 - state->start;
2654 		spin_unlock(&inode->lock);
2655 	}
2656 }
2657 
2658 /*
2659  * Once a range is no longer delalloc this function ensures that proper
2660  * accounting happens.
2661  */
btrfs_clear_delalloc_extent(struct btrfs_inode * inode,struct extent_state * state,u32 bits)2662 void btrfs_clear_delalloc_extent(struct btrfs_inode *inode,
2663 				 struct extent_state *state, u32 bits)
2664 {
2665 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2666 	u64 len = state->end + 1 - state->start;
2667 	u32 num_extents = count_max_extents(fs_info, len);
2668 
2669 	lockdep_assert_held(&inode->io_tree.lock);
2670 
2671 	if ((state->state & EXTENT_DEFRAG) && (bits & EXTENT_DEFRAG)) {
2672 		spin_lock(&inode->lock);
2673 		inode->defrag_bytes -= len;
2674 		spin_unlock(&inode->lock);
2675 	}
2676 
2677 	/*
2678 	 * set_bit and clear bit hooks normally require _irqsave/restore
2679 	 * but in this case, we are only testing for the DELALLOC
2680 	 * bit, which is only set or cleared with irqs on
2681 	 */
2682 	if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
2683 		struct btrfs_root *root = inode->root;
2684 		u64 new_delalloc_bytes;
2685 
2686 		spin_lock(&inode->lock);
2687 		btrfs_mod_outstanding_extents(inode, -num_extents);
2688 		spin_unlock(&inode->lock);
2689 
2690 		/*
2691 		 * We don't reserve metadata space for space cache inodes so we
2692 		 * don't need to call delalloc_release_metadata if there is an
2693 		 * error.
2694 		 */
2695 		if (bits & EXTENT_CLEAR_META_RESV &&
2696 		    root != fs_info->tree_root)
2697 			btrfs_delalloc_release_metadata(inode, len, true);
2698 
2699 		/* For sanity tests. */
2700 		if (btrfs_is_testing(fs_info))
2701 			return;
2702 
2703 		if (!btrfs_is_data_reloc_root(root) &&
2704 		    !btrfs_is_free_space_inode(inode) &&
2705 		    !(state->state & EXTENT_NORESERVE) &&
2706 		    (bits & EXTENT_CLEAR_DATA_RESV))
2707 			btrfs_free_reserved_data_space_noquota(inode, len);
2708 
2709 		percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
2710 					 fs_info->delalloc_batch);
2711 		spin_lock(&inode->lock);
2712 		inode->delalloc_bytes -= len;
2713 		new_delalloc_bytes = inode->delalloc_bytes;
2714 		spin_unlock(&inode->lock);
2715 
2716 		/*
2717 		 * We don't need to be under the protection of the inode's lock,
2718 		 * because we are called while holding the inode's io_tree lock
2719 		 * and are therefore protected against concurrent calls of this
2720 		 * function and btrfs_set_delalloc_extent().
2721 		 */
2722 		if (!btrfs_is_free_space_inode(inode) && new_delalloc_bytes == 0) {
2723 			spin_lock(&root->delalloc_lock);
2724 			btrfs_del_delalloc_inode(inode);
2725 			spin_unlock(&root->delalloc_lock);
2726 		}
2727 	}
2728 
2729 	if ((state->state & EXTENT_DELALLOC_NEW) &&
2730 	    (bits & EXTENT_DELALLOC_NEW)) {
2731 		spin_lock(&inode->lock);
2732 		ASSERT(inode->new_delalloc_bytes >= len);
2733 		inode->new_delalloc_bytes -= len;
2734 		if (bits & EXTENT_ADD_INODE_BYTES)
2735 			inode_add_bytes(&inode->vfs_inode, len);
2736 		spin_unlock(&inode->lock);
2737 	}
2738 }
2739 
2740 /*
2741  * given a list of ordered sums record them in the inode.  This happens
2742  * at IO completion time based on sums calculated at bio submission time.
2743  */
add_pending_csums(struct btrfs_trans_handle * trans,struct list_head * list)2744 static int add_pending_csums(struct btrfs_trans_handle *trans,
2745 			     struct list_head *list)
2746 {
2747 	struct btrfs_ordered_sum *sum;
2748 	struct btrfs_root *csum_root = NULL;
2749 	int ret;
2750 
2751 	list_for_each_entry(sum, list, list) {
2752 		trans->adding_csums = true;
2753 		if (!csum_root)
2754 			csum_root = btrfs_csum_root(trans->fs_info,
2755 						    sum->logical);
2756 		ret = btrfs_csum_file_blocks(trans, csum_root, sum);
2757 		trans->adding_csums = false;
2758 		if (ret)
2759 			return ret;
2760 	}
2761 	return 0;
2762 }
2763 
btrfs_find_new_delalloc_bytes(struct btrfs_inode * inode,const u64 start,const u64 len,struct extent_state ** cached_state)2764 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
2765 					 const u64 start,
2766 					 const u64 len,
2767 					 struct extent_state **cached_state)
2768 {
2769 	u64 search_start = start;
2770 	const u64 end = start + len - 1;
2771 
2772 	while (search_start < end) {
2773 		const u64 search_len = end - search_start + 1;
2774 		struct extent_map *em;
2775 		u64 em_len;
2776 		int ret = 0;
2777 
2778 		em = btrfs_get_extent(inode, NULL, search_start, search_len);
2779 		if (IS_ERR(em))
2780 			return PTR_ERR(em);
2781 
2782 		if (em->disk_bytenr != EXTENT_MAP_HOLE)
2783 			goto next;
2784 
2785 		em_len = em->len;
2786 		if (em->start < search_start)
2787 			em_len -= search_start - em->start;
2788 		if (em_len > search_len)
2789 			em_len = search_len;
2790 
2791 		ret = btrfs_set_extent_bit(&inode->io_tree, search_start,
2792 					   search_start + em_len - 1,
2793 					   EXTENT_DELALLOC_NEW, cached_state);
2794 next:
2795 		search_start = btrfs_extent_map_end(em);
2796 		btrfs_free_extent_map(em);
2797 		if (ret)
2798 			return ret;
2799 	}
2800 	return 0;
2801 }
2802 
btrfs_set_extent_delalloc(struct btrfs_inode * inode,u64 start,u64 end,unsigned int extra_bits,struct extent_state ** cached_state)2803 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
2804 			      unsigned int extra_bits,
2805 			      struct extent_state **cached_state)
2806 {
2807 	WARN_ON(PAGE_ALIGNED(end));
2808 
2809 	if (start >= i_size_read(&inode->vfs_inode) &&
2810 	    !(inode->flags & BTRFS_INODE_PREALLOC)) {
2811 		/*
2812 		 * There can't be any extents following eof in this case so just
2813 		 * set the delalloc new bit for the range directly.
2814 		 */
2815 		extra_bits |= EXTENT_DELALLOC_NEW;
2816 	} else {
2817 		int ret;
2818 
2819 		ret = btrfs_find_new_delalloc_bytes(inode, start,
2820 						    end + 1 - start,
2821 						    cached_state);
2822 		if (ret)
2823 			return ret;
2824 	}
2825 
2826 	return btrfs_set_extent_bit(&inode->io_tree, start, end,
2827 				    EXTENT_DELALLOC | extra_bits, cached_state);
2828 }
2829 
2830 /* see btrfs_writepage_start_hook for details on why this is required */
2831 struct btrfs_writepage_fixup {
2832 	struct folio *folio;
2833 	struct btrfs_inode *inode;
2834 	struct btrfs_work work;
2835 };
2836 
btrfs_writepage_fixup_worker(struct btrfs_work * work)2837 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
2838 {
2839 	struct btrfs_writepage_fixup *fixup =
2840 		container_of(work, struct btrfs_writepage_fixup, work);
2841 	struct btrfs_ordered_extent *ordered;
2842 	struct extent_state *cached_state = NULL;
2843 	struct extent_changeset *data_reserved = NULL;
2844 	struct folio *folio = fixup->folio;
2845 	struct btrfs_inode *inode = fixup->inode;
2846 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2847 	u64 page_start = folio_pos(folio);
2848 	u64 page_end = folio_next_pos(folio) - 1;
2849 	int ret = 0;
2850 	bool free_delalloc_space = true;
2851 
2852 	/*
2853 	 * This is similar to page_mkwrite, we need to reserve the space before
2854 	 * we take the folio lock.
2855 	 */
2856 	ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
2857 					   folio_size(folio));
2858 again:
2859 	folio_lock(folio);
2860 
2861 	/*
2862 	 * Before we queued this fixup, we took a reference on the folio.
2863 	 * folio->mapping may go NULL, but it shouldn't be moved to a different
2864 	 * address space.
2865 	 */
2866 	if (!folio->mapping || !folio_test_dirty(folio) ||
2867 	    !folio_test_checked(folio)) {
2868 		/*
2869 		 * Unfortunately this is a little tricky, either
2870 		 *
2871 		 * 1) We got here and our folio had already been dealt with and
2872 		 *    we reserved our space, thus ret == 0, so we need to just
2873 		 *    drop our space reservation and bail.  This can happen the
2874 		 *    first time we come into the fixup worker, or could happen
2875 		 *    while waiting for the ordered extent.
2876 		 * 2) Our folio was already dealt with, but we happened to get an
2877 		 *    ENOSPC above from the btrfs_delalloc_reserve_space.  In
2878 		 *    this case we obviously don't have anything to release, but
2879 		 *    because the folio was already dealt with we don't want to
2880 		 *    mark the folio with an error, so make sure we're resetting
2881 		 *    ret to 0.  This is why we have this check _before_ the ret
2882 		 *    check, because we do not want to have a surprise ENOSPC
2883 		 *    when the folio was already properly dealt with.
2884 		 */
2885 		if (!ret) {
2886 			btrfs_delalloc_release_extents(inode, folio_size(folio));
2887 			btrfs_delalloc_release_space(inode, data_reserved,
2888 						     page_start, folio_size(folio),
2889 						     true);
2890 		}
2891 		ret = 0;
2892 		goto out_page;
2893 	}
2894 
2895 	/*
2896 	 * We can't mess with the folio state unless it is locked, so now that
2897 	 * it is locked bail if we failed to make our space reservation.
2898 	 */
2899 	if (ret)
2900 		goto out_page;
2901 
2902 	btrfs_lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
2903 
2904 	/* already ordered? We're done */
2905 	if (folio_test_ordered(folio))
2906 		goto out_reserved;
2907 
2908 	ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
2909 	if (ordered) {
2910 		btrfs_unlock_extent(&inode->io_tree, page_start, page_end,
2911 				    &cached_state);
2912 		folio_unlock(folio);
2913 		btrfs_start_ordered_extent(ordered);
2914 		btrfs_put_ordered_extent(ordered);
2915 		goto again;
2916 	}
2917 
2918 	ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
2919 					&cached_state);
2920 	if (ret)
2921 		goto out_reserved;
2922 
2923 	/*
2924 	 * Everything went as planned, we're now the owner of a dirty page with
2925 	 * delayed allocation bits set and space reserved for our COW
2926 	 * destination.
2927 	 *
2928 	 * The page was dirty when we started, nothing should have cleaned it.
2929 	 */
2930 	BUG_ON(!folio_test_dirty(folio));
2931 	free_delalloc_space = false;
2932 out_reserved:
2933 	btrfs_delalloc_release_extents(inode, PAGE_SIZE);
2934 	if (free_delalloc_space)
2935 		btrfs_delalloc_release_space(inode, data_reserved, page_start,
2936 					     PAGE_SIZE, true);
2937 	btrfs_unlock_extent(&inode->io_tree, page_start, page_end, &cached_state);
2938 out_page:
2939 	if (ret) {
2940 		/*
2941 		 * We hit ENOSPC or other errors.  Update the mapping and page
2942 		 * to reflect the errors and clean the page.
2943 		 */
2944 		mapping_set_error(folio->mapping, ret);
2945 		btrfs_mark_ordered_io_finished(inode, folio, page_start,
2946 					       folio_size(folio), !ret);
2947 		folio_clear_dirty_for_io(folio);
2948 	}
2949 	btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE);
2950 	folio_unlock(folio);
2951 	folio_put(folio);
2952 	kfree(fixup);
2953 	extent_changeset_free(data_reserved);
2954 	/*
2955 	 * As a precaution, do a delayed iput in case it would be the last iput
2956 	 * that could need flushing space. Recursing back to fixup worker would
2957 	 * deadlock.
2958 	 */
2959 	btrfs_add_delayed_iput(inode);
2960 }
2961 
2962 /*
2963  * There are a few paths in the higher layers of the kernel that directly
2964  * set the folio dirty bit without asking the filesystem if it is a
2965  * good idea.  This causes problems because we want to make sure COW
2966  * properly happens and the data=ordered rules are followed.
2967  *
2968  * In our case any range that doesn't have the ORDERED bit set
2969  * hasn't been properly setup for IO.  We kick off an async process
2970  * to fix it up.  The async helper will wait for ordered extents, set
2971  * the delalloc bit and make it safe to write the folio.
2972  */
btrfs_writepage_cow_fixup(struct folio * folio)2973 int btrfs_writepage_cow_fixup(struct folio *folio)
2974 {
2975 	struct inode *inode = folio->mapping->host;
2976 	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
2977 	struct btrfs_writepage_fixup *fixup;
2978 
2979 	/* This folio has ordered extent covering it already */
2980 	if (folio_test_ordered(folio))
2981 		return 0;
2982 
2983 	/*
2984 	 * For experimental build, we error out instead of EAGAIN.
2985 	 *
2986 	 * We should not hit such out-of-band dirty folios anymore.
2987 	 */
2988 	if (IS_ENABLED(CONFIG_BTRFS_EXPERIMENTAL)) {
2989 		DEBUG_WARN();
2990 		btrfs_err_rl(fs_info,
2991 	"root %lld ino %llu folio %llu is marked dirty without notifying the fs",
2992 			     btrfs_root_id(BTRFS_I(inode)->root),
2993 			     btrfs_ino(BTRFS_I(inode)),
2994 			     folio_pos(folio));
2995 		return -EUCLEAN;
2996 	}
2997 
2998 	/*
2999 	 * folio_checked is set below when we create a fixup worker for this
3000 	 * folio, don't try to create another one if we're already
3001 	 * folio_test_checked.
3002 	 *
3003 	 * The extent_io writepage code will redirty the foio if we send back
3004 	 * EAGAIN.
3005 	 */
3006 	if (folio_test_checked(folio))
3007 		return -EAGAIN;
3008 
3009 	fixup = kzalloc_obj(*fixup, GFP_NOFS);
3010 	if (!fixup)
3011 		return -EAGAIN;
3012 
3013 	/*
3014 	 * We are already holding a reference to this inode from
3015 	 * write_cache_pages.  We need to hold it because the space reservation
3016 	 * takes place outside of the folio lock, and we can't trust
3017 	 * folio->mapping outside of the folio lock.
3018 	 */
3019 	ihold(inode);
3020 	btrfs_folio_set_checked(fs_info, folio, folio_pos(folio), folio_size(folio));
3021 	folio_get(folio);
3022 	btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL);
3023 	fixup->folio = folio;
3024 	fixup->inode = BTRFS_I(inode);
3025 	btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
3026 
3027 	return -EAGAIN;
3028 }
3029 
insert_reserved_file_extent(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,u64 file_pos,struct btrfs_file_extent_item * stack_fi,const bool update_inode_bytes,u64 qgroup_reserved)3030 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
3031 				       struct btrfs_inode *inode, u64 file_pos,
3032 				       struct btrfs_file_extent_item *stack_fi,
3033 				       const bool update_inode_bytes,
3034 				       u64 qgroup_reserved)
3035 {
3036 	struct btrfs_root *root = inode->root;
3037 	const u64 sectorsize = root->fs_info->sectorsize;
3038 	BTRFS_PATH_AUTO_FREE(path);
3039 	struct extent_buffer *leaf;
3040 	struct btrfs_key ins;
3041 	u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi);
3042 	u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi);
3043 	u64 offset = btrfs_stack_file_extent_offset(stack_fi);
3044 	u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi);
3045 	u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi);
3046 	struct btrfs_drop_extents_args drop_args = { 0 };
3047 	int ret;
3048 
3049 	path = btrfs_alloc_path();
3050 	if (!path)
3051 		return -ENOMEM;
3052 
3053 	/*
3054 	 * we may be replacing one extent in the tree with another.
3055 	 * The new extent is pinned in the extent map, and we don't want
3056 	 * to drop it from the cache until it is completely in the btree.
3057 	 *
3058 	 * So, tell btrfs_drop_extents to leave this extent in the cache.
3059 	 * the caller is expected to unpin it and allow it to be merged
3060 	 * with the others.
3061 	 */
3062 	drop_args.path = path;
3063 	drop_args.start = file_pos;
3064 	drop_args.end = file_pos + num_bytes;
3065 	drop_args.replace_extent = true;
3066 	drop_args.extent_item_size = sizeof(*stack_fi);
3067 	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
3068 	if (ret)
3069 		return ret;
3070 
3071 	if (!drop_args.extent_inserted) {
3072 		ins.objectid = btrfs_ino(inode);
3073 		ins.type = BTRFS_EXTENT_DATA_KEY;
3074 		ins.offset = file_pos;
3075 
3076 		ret = btrfs_insert_empty_item(trans, root, path, &ins,
3077 					      sizeof(*stack_fi));
3078 		if (ret)
3079 			return ret;
3080 	}
3081 	leaf = path->nodes[0];
3082 	btrfs_set_stack_file_extent_generation(stack_fi, trans->transid);
3083 	write_extent_buffer(leaf, stack_fi,
3084 			btrfs_item_ptr_offset(leaf, path->slots[0]),
3085 			sizeof(struct btrfs_file_extent_item));
3086 
3087 	btrfs_release_path(path);
3088 
3089 	/*
3090 	 * If we dropped an inline extent here, we know the range where it is
3091 	 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the
3092 	 * number of bytes only for that range containing the inline extent.
3093 	 * The remaining of the range will be processed when clearing the
3094 	 * EXTENT_DELALLOC_BIT bit through the ordered extent completion.
3095 	 */
3096 	if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) {
3097 		u64 inline_size = round_down(drop_args.bytes_found, sectorsize);
3098 
3099 		inline_size = drop_args.bytes_found - inline_size;
3100 		btrfs_update_inode_bytes(inode, sectorsize, inline_size);
3101 		drop_args.bytes_found -= inline_size;
3102 		num_bytes -= sectorsize;
3103 	}
3104 
3105 	if (update_inode_bytes)
3106 		btrfs_update_inode_bytes(inode, num_bytes, drop_args.bytes_found);
3107 
3108 	ins.objectid = disk_bytenr;
3109 	ins.type = BTRFS_EXTENT_ITEM_KEY;
3110 	ins.offset = disk_num_bytes;
3111 
3112 	ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes);
3113 	if (ret)
3114 		return ret;
3115 
3116 	return btrfs_alloc_reserved_file_extent(trans, root, btrfs_ino(inode),
3117 						file_pos - offset,
3118 						qgroup_reserved, &ins);
3119 }
3120 
btrfs_release_delalloc_bytes(struct btrfs_fs_info * fs_info,u64 start,u64 len)3121 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
3122 					 u64 start, u64 len)
3123 {
3124 	struct btrfs_block_group *cache;
3125 
3126 	cache = btrfs_lookup_block_group(fs_info, start);
3127 	ASSERT(cache);
3128 
3129 	spin_lock(&cache->lock);
3130 	cache->delalloc_bytes -= len;
3131 	spin_unlock(&cache->lock);
3132 
3133 	btrfs_put_block_group(cache);
3134 }
3135 
insert_ordered_extent_file_extent(struct btrfs_trans_handle * trans,struct btrfs_ordered_extent * oe)3136 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans,
3137 					     struct btrfs_ordered_extent *oe)
3138 {
3139 	struct btrfs_file_extent_item stack_fi;
3140 	bool update_inode_bytes;
3141 	u64 num_bytes = oe->num_bytes;
3142 	u64 ram_bytes = oe->ram_bytes;
3143 
3144 	memset(&stack_fi, 0, sizeof(stack_fi));
3145 	btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG);
3146 	btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr);
3147 	btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi,
3148 						   oe->disk_num_bytes);
3149 	btrfs_set_stack_file_extent_offset(&stack_fi, oe->offset);
3150 	if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags))
3151 		num_bytes = oe->truncated_len;
3152 	btrfs_set_stack_file_extent_num_bytes(&stack_fi, num_bytes);
3153 	btrfs_set_stack_file_extent_ram_bytes(&stack_fi, ram_bytes);
3154 	btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type);
3155 	/* Encryption and other encoding is reserved and all 0 */
3156 
3157 	/*
3158 	 * For delalloc, when completing an ordered extent we update the inode's
3159 	 * bytes when clearing the range in the inode's io tree, so pass false
3160 	 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(),
3161 	 * except if the ordered extent was truncated.
3162 	 */
3163 	update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) ||
3164 			     test_bit(BTRFS_ORDERED_ENCODED, &oe->flags) ||
3165 			     test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags);
3166 
3167 	return insert_reserved_file_extent(trans, oe->inode,
3168 					   oe->file_offset, &stack_fi,
3169 					   update_inode_bytes, oe->qgroup_rsv);
3170 }
3171 
3172 /*
3173  * As ordered data IO finishes, this gets called so we can finish
3174  * an ordered extent if the range of bytes in the file it covers are
3175  * fully written.
3176  */
btrfs_finish_one_ordered(struct btrfs_ordered_extent * ordered_extent)3177 int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
3178 {
3179 	struct btrfs_inode *inode = ordered_extent->inode;
3180 	struct btrfs_root *root = inode->root;
3181 	struct btrfs_fs_info *fs_info = root->fs_info;
3182 	struct btrfs_trans_handle *trans = NULL;
3183 	struct extent_io_tree *io_tree = &inode->io_tree;
3184 	struct extent_state *cached_state = NULL;
3185 	u64 start, end;
3186 	int compress_type = 0;
3187 	int ret = 0;
3188 	u64 logical_len = ordered_extent->num_bytes;
3189 	bool freespace_inode;
3190 	bool truncated = false;
3191 	bool clear_reserved_extent = true;
3192 	unsigned int clear_bits = EXTENT_DEFRAG;
3193 
3194 	start = ordered_extent->file_offset;
3195 	end = start + ordered_extent->num_bytes - 1;
3196 
3197 	if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3198 	    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
3199 	    !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags) &&
3200 	    !test_bit(BTRFS_ORDERED_ENCODED, &ordered_extent->flags))
3201 		clear_bits |= EXTENT_DELALLOC_NEW;
3202 
3203 	freespace_inode = btrfs_is_free_space_inode(inode);
3204 	if (!freespace_inode)
3205 		btrfs_lockdep_acquire(fs_info, btrfs_ordered_extent);
3206 
3207 	if (unlikely(test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags))) {
3208 		ret = -EIO;
3209 		goto out;
3210 	}
3211 
3212 	ret = btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr,
3213 				      ordered_extent->disk_num_bytes);
3214 	if (ret)
3215 		goto out;
3216 
3217 	if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
3218 		truncated = true;
3219 		logical_len = ordered_extent->truncated_len;
3220 		/* Truncated the entire extent, don't bother adding */
3221 		if (!logical_len)
3222 			goto out;
3223 	}
3224 
3225 	/*
3226 	 * If it's a COW write we need to lock the extent range as we will be
3227 	 * inserting/replacing file extent items and unpinning an extent map.
3228 	 * This must be taken before joining a transaction, as it's a higher
3229 	 * level lock (like the inode's VFS lock), otherwise we can run into an
3230 	 * ABBA deadlock with other tasks (transactions work like a lock,
3231 	 * depending on their current state).
3232 	 */
3233 	if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
3234 		clear_bits |= EXTENT_LOCKED | EXTENT_FINISHING_ORDERED;
3235 		btrfs_lock_extent_bits(io_tree, start, end,
3236 				       EXTENT_LOCKED | EXTENT_FINISHING_ORDERED,
3237 				       &cached_state);
3238 	}
3239 
3240 	if (freespace_inode)
3241 		trans = btrfs_join_transaction_spacecache(root);
3242 	else
3243 		trans = btrfs_join_transaction(root);
3244 	if (IS_ERR(trans)) {
3245 		ret = PTR_ERR(trans);
3246 		trans = NULL;
3247 		goto out;
3248 	}
3249 
3250 	trans->block_rsv = &inode->block_rsv;
3251 
3252 	ret = btrfs_insert_raid_extent(trans, ordered_extent);
3253 	if (unlikely(ret)) {
3254 		btrfs_abort_transaction(trans, ret);
3255 		goto out;
3256 	}
3257 
3258 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
3259 		/* Logic error */
3260 		ASSERT(list_empty(&ordered_extent->list));
3261 		if (unlikely(!list_empty(&ordered_extent->list))) {
3262 			ret = -EINVAL;
3263 			btrfs_abort_transaction(trans, ret);
3264 			goto out;
3265 		}
3266 
3267 		btrfs_inode_safe_disk_i_size_write(inode, 0);
3268 		ret = btrfs_update_inode_fallback(trans, inode);
3269 		if (unlikely(ret)) {
3270 			/* -ENOMEM or corruption */
3271 			btrfs_abort_transaction(trans, ret);
3272 		}
3273 		goto out;
3274 	}
3275 
3276 	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
3277 		compress_type = ordered_extent->compress_type;
3278 	if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3279 		BUG_ON(compress_type);
3280 		ret = btrfs_mark_extent_written(trans, inode,
3281 						ordered_extent->file_offset,
3282 						ordered_extent->file_offset +
3283 						logical_len);
3284 		btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr,
3285 						  ordered_extent->disk_num_bytes);
3286 		if (unlikely(ret < 0)) {
3287 			btrfs_abort_transaction(trans, ret);
3288 			goto out;
3289 		}
3290 	} else {
3291 		BUG_ON(root == fs_info->tree_root);
3292 		ret = insert_ordered_extent_file_extent(trans, ordered_extent);
3293 		if (unlikely(ret < 0)) {
3294 			btrfs_abort_transaction(trans, ret);
3295 			goto out;
3296 		}
3297 		clear_reserved_extent = false;
3298 		btrfs_release_delalloc_bytes(fs_info,
3299 					     ordered_extent->disk_bytenr,
3300 					     ordered_extent->disk_num_bytes);
3301 	}
3302 
3303 	ret = btrfs_unpin_extent_cache(inode, ordered_extent->file_offset,
3304 				       ordered_extent->num_bytes, trans->transid);
3305 	if (unlikely(ret < 0)) {
3306 		btrfs_abort_transaction(trans, ret);
3307 		goto out;
3308 	}
3309 
3310 	ret = add_pending_csums(trans, &ordered_extent->list);
3311 	if (unlikely(ret)) {
3312 		btrfs_abort_transaction(trans, ret);
3313 		goto out;
3314 	}
3315 
3316 	/*
3317 	 * If this is a new delalloc range, clear its new delalloc flag to
3318 	 * update the inode's number of bytes. This needs to be done first
3319 	 * before updating the inode item.
3320 	 */
3321 	if ((clear_bits & EXTENT_DELALLOC_NEW) &&
3322 	    !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags))
3323 		btrfs_clear_extent_bit(&inode->io_tree, start, end,
3324 				       EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES,
3325 				       &cached_state);
3326 
3327 	btrfs_inode_safe_disk_i_size_write(inode, 0);
3328 	ret = btrfs_update_inode_fallback(trans, inode);
3329 	if (unlikely(ret)) { /* -ENOMEM or corruption */
3330 		btrfs_abort_transaction(trans, ret);
3331 		goto out;
3332 	}
3333 out:
3334 	btrfs_clear_extent_bit(&inode->io_tree, start, end, clear_bits,
3335 			       &cached_state);
3336 
3337 	if (trans)
3338 		btrfs_end_transaction(trans);
3339 
3340 	if (ret || truncated) {
3341 		/*
3342 		 * If we failed to finish this ordered extent for any reason we
3343 		 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
3344 		 * extent, and mark the inode with the error if it wasn't
3345 		 * already set.  Any error during writeback would have already
3346 		 * set the mapping error, so we need to set it if we're the ones
3347 		 * marking this ordered extent as failed.
3348 		 */
3349 		if (ret)
3350 			btrfs_mark_ordered_extent_error(ordered_extent);
3351 
3352 		/*
3353 		 * Drop extent maps for the part of the extent we didn't write.
3354 		 *
3355 		 * We have an exception here for the free_space_inode, this is
3356 		 * because when we do btrfs_get_extent() on the free space inode
3357 		 * we will search the commit root.  If this is a new block group
3358 		 * we won't find anything, and we will trip over the assert in
3359 		 * writepage where we do ASSERT(em->block_start !=
3360 		 * EXTENT_MAP_HOLE).
3361 		 *
3362 		 * Theoretically we could also skip this for any NOCOW extent as
3363 		 * we don't mess with the extent map tree in the NOCOW case, but
3364 		 * for now simply skip this if we are the free space inode.
3365 		 */
3366 		if (!btrfs_is_free_space_inode(inode)) {
3367 			u64 unwritten_start = start;
3368 
3369 			if (truncated)
3370 				unwritten_start += logical_len;
3371 
3372 			btrfs_drop_extent_map_range(inode, unwritten_start,
3373 						    end, false);
3374 		}
3375 
3376 		/*
3377 		 * If the ordered extent had an IOERR or something else went
3378 		 * wrong we need to return the space for this ordered extent
3379 		 * back to the allocator.  We only free the extent in the
3380 		 * truncated case if we didn't write out the extent at all.
3381 		 *
3382 		 * If we made it past insert_reserved_file_extent before we
3383 		 * errored out then we don't need to do this as the accounting
3384 		 * has already been done.
3385 		 */
3386 		if ((ret || !logical_len) &&
3387 		    clear_reserved_extent &&
3388 		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3389 		    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3390 			/*
3391 			 * Discard the range before returning it back to the
3392 			 * free space pool
3393 			 */
3394 			if (ret && btrfs_test_opt(fs_info, DISCARD_SYNC))
3395 				btrfs_discard_extent(fs_info,
3396 						ordered_extent->disk_bytenr,
3397 						ordered_extent->disk_num_bytes,
3398 						NULL, true);
3399 			btrfs_free_reserved_extent(fs_info,
3400 					ordered_extent->disk_bytenr,
3401 					ordered_extent->disk_num_bytes, true);
3402 			/*
3403 			 * Actually free the qgroup rsv which was released when
3404 			 * the ordered extent was created.
3405 			 */
3406 			btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(inode->root),
3407 						  ordered_extent->qgroup_rsv,
3408 						  BTRFS_QGROUP_RSV_DATA);
3409 		}
3410 	}
3411 
3412 	/*
3413 	 * This needs to be done to make sure anybody waiting knows we are done
3414 	 * updating everything for this ordered extent.
3415 	 */
3416 	btrfs_remove_ordered_extent(inode, ordered_extent);
3417 
3418 	/* once for us */
3419 	btrfs_put_ordered_extent(ordered_extent);
3420 	/* once for the tree */
3421 	btrfs_put_ordered_extent(ordered_extent);
3422 
3423 	return ret;
3424 }
3425 
btrfs_finish_ordered_io(struct btrfs_ordered_extent * ordered)3426 int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered)
3427 {
3428 	if (btrfs_is_zoned(ordered->inode->root->fs_info) &&
3429 	    !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
3430 	    list_empty(&ordered->bioc_list))
3431 		btrfs_finish_ordered_zoned(ordered);
3432 	return btrfs_finish_one_ordered(ordered);
3433 }
3434 
3435 /*
3436  * Calculate the checksum of an fs block at physical memory address @paddr,
3437  * and save the result to @dest.
3438  *
3439  * The folio containing @paddr must be large enough to contain a full fs block.
3440  */
btrfs_calculate_block_csum_folio(struct btrfs_fs_info * fs_info,const phys_addr_t paddr,u8 * dest)3441 void btrfs_calculate_block_csum_folio(struct btrfs_fs_info *fs_info,
3442 				      const phys_addr_t paddr, u8 *dest)
3443 {
3444 	struct folio *folio = page_folio(phys_to_page(paddr));
3445 	const u32 blocksize = fs_info->sectorsize;
3446 	const u32 step = min(blocksize, PAGE_SIZE);
3447 	const u32 nr_steps = blocksize / step;
3448 	phys_addr_t paddrs[BTRFS_MAX_BLOCKSIZE / PAGE_SIZE];
3449 
3450 	/* The full block must be inside the folio. */
3451 	ASSERT(offset_in_folio(folio, paddr) + blocksize <= folio_size(folio));
3452 
3453 	for (int i = 0; i < nr_steps; i++) {
3454 		u32 pindex = offset_in_folio(folio, paddr + i * step) >> PAGE_SHIFT;
3455 
3456 		/*
3457 		 * For bs <= ps cases, we will only run the loop once, so the offset
3458 		 * inside the page will only added to paddrs[0].
3459 		 *
3460 		 * For bs > ps cases, the block must be page aligned, thus offset
3461 		 * inside the page will always be 0.
3462 		 */
3463 		paddrs[i] = page_to_phys(folio_page(folio, pindex)) + offset_in_page(paddr);
3464 	}
3465 	return btrfs_calculate_block_csum_pages(fs_info, paddrs, dest);
3466 }
3467 
3468 /*
3469  * Calculate the checksum of a fs block backed by multiple noncontiguous pages
3470  * at @paddrs[] and save the result to @dest.
3471  *
3472  * The folio containing @paddr must be large enough to contain a full fs block.
3473  */
btrfs_calculate_block_csum_pages(struct btrfs_fs_info * fs_info,const phys_addr_t paddrs[],u8 * dest)3474 void btrfs_calculate_block_csum_pages(struct btrfs_fs_info *fs_info,
3475 				      const phys_addr_t paddrs[], u8 *dest)
3476 {
3477 	const u32 blocksize = fs_info->sectorsize;
3478 	const u32 step = min(blocksize, PAGE_SIZE);
3479 	const u32 nr_steps = blocksize / step;
3480 	struct btrfs_csum_ctx csum;
3481 
3482 	btrfs_csum_init(&csum, fs_info->csum_type);
3483 	for (int i = 0; i < nr_steps; i++) {
3484 		const phys_addr_t paddr = paddrs[i];
3485 		void *kaddr;
3486 
3487 		ASSERT(offset_in_page(paddr) + step <= PAGE_SIZE);
3488 		kaddr = kmap_local_page(phys_to_page(paddr)) + offset_in_page(paddr);
3489 		btrfs_csum_update(&csum, kaddr, step);
3490 		kunmap_local(kaddr);
3491 	}
3492 	btrfs_csum_final(&csum, dest);
3493 }
3494 
3495 /*
3496  * Verify the checksum for a single sector without any extra action that depend
3497  * on the type of I/O.
3498  *
3499  * @kaddr must be a properly kmapped address.
3500  */
btrfs_check_block_csum(struct btrfs_fs_info * fs_info,phys_addr_t paddr,u8 * csum,const u8 * const csum_expected)3501 int btrfs_check_block_csum(struct btrfs_fs_info *fs_info, phys_addr_t paddr, u8 *csum,
3502 			   const u8 * const csum_expected)
3503 {
3504 	btrfs_calculate_block_csum_folio(fs_info, paddr, csum);
3505 	if (unlikely(memcmp(csum, csum_expected, fs_info->csum_size) != 0))
3506 		return -EIO;
3507 	return 0;
3508 }
3509 
3510 /*
3511  * Verify the checksum of a single data sector, which can be scattered at
3512  * different noncontiguous pages.
3513  *
3514  * @bbio:	btrfs_io_bio which contains the csum
3515  * @dev:	device the sector is on
3516  * @bio_offset:	offset to the beginning of the bio (in bytes)
3517  * @paddrs:	physical addresses which back the fs block
3518  *
3519  * Check if the checksum on a data block is valid.  When a checksum mismatch is
3520  * detected, report the error and fill the corrupted range with zero.
3521  *
3522  * Return %true if the sector is ok or had no checksum to start with, else %false.
3523  */
btrfs_data_csum_ok(struct btrfs_bio * bbio,struct btrfs_device * dev,u32 bio_offset,const phys_addr_t paddrs[])3524 bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev,
3525 			u32 bio_offset, const phys_addr_t paddrs[])
3526 {
3527 	struct btrfs_inode *inode = bbio->inode;
3528 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3529 	const u32 blocksize = fs_info->sectorsize;
3530 	const u32 step = min(blocksize, PAGE_SIZE);
3531 	const u32 nr_steps = blocksize / step;
3532 	u64 file_offset = bbio->file_offset + bio_offset;
3533 	u64 end = file_offset + blocksize - 1;
3534 	u8 *csum_expected;
3535 	u8 csum[BTRFS_CSUM_SIZE];
3536 
3537 	if (!bbio->csum)
3538 		return true;
3539 
3540 	if (btrfs_is_data_reloc_root(inode->root) &&
3541 	    btrfs_test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM,
3542 				 NULL)) {
3543 		/* Skip the range without csum for data reloc inode */
3544 		btrfs_clear_extent_bit(&inode->io_tree, file_offset, end,
3545 				       EXTENT_NODATASUM, NULL);
3546 		return true;
3547 	}
3548 
3549 	csum_expected = bbio->csum + (bio_offset >> fs_info->sectorsize_bits) *
3550 				fs_info->csum_size;
3551 	btrfs_calculate_block_csum_pages(fs_info, paddrs, csum);
3552 	if (unlikely(memcmp(csum, csum_expected, fs_info->csum_size) != 0))
3553 		goto zeroit;
3554 	return true;
3555 
3556 zeroit:
3557 	btrfs_print_data_csum_error(inode, file_offset, csum, csum_expected,
3558 				    bbio->mirror_num);
3559 	if (dev)
3560 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS);
3561 	for (int i = 0; i < nr_steps; i++)
3562 		memzero_page(phys_to_page(paddrs[i]), offset_in_page(paddrs[i]), step);
3563 	return false;
3564 }
3565 
3566 /*
3567  * Perform a delayed iput on @inode.
3568  *
3569  * @inode: The inode we want to perform iput on
3570  *
3571  * This function uses the generic vfs_inode::i_count to track whether we should
3572  * just decrement it (in case it's > 1) or if this is the last iput then link
3573  * the inode to the delayed iput machinery. Delayed iputs are processed at
3574  * transaction commit time/superblock commit/cleaner kthread.
3575  */
btrfs_add_delayed_iput(struct btrfs_inode * inode)3576 void btrfs_add_delayed_iput(struct btrfs_inode *inode)
3577 {
3578 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3579 	unsigned long flags;
3580 
3581 	if (atomic_add_unless(&inode->vfs_inode.i_count, -1, 1))
3582 		return;
3583 
3584 	WARN_ON_ONCE(test_bit(BTRFS_FS_STATE_NO_DELAYED_IPUT, &fs_info->fs_state));
3585 	atomic_inc(&fs_info->nr_delayed_iputs);
3586 	/*
3587 	 * Need to be irq safe here because we can be called from either an irq
3588 	 * context (see bio.c and btrfs_put_ordered_extent()) or a non-irq
3589 	 * context.
3590 	 */
3591 	spin_lock_irqsave(&fs_info->delayed_iput_lock, flags);
3592 	ASSERT(list_empty(&inode->delayed_iput));
3593 	list_add_tail(&inode->delayed_iput, &fs_info->delayed_iputs);
3594 	spin_unlock_irqrestore(&fs_info->delayed_iput_lock, flags);
3595 	if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags))
3596 		wake_up_process(fs_info->cleaner_kthread);
3597 }
3598 
run_delayed_iput_locked(struct btrfs_fs_info * fs_info,struct btrfs_inode * inode)3599 static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info,
3600 				    struct btrfs_inode *inode)
3601 {
3602 	list_del_init(&inode->delayed_iput);
3603 	spin_unlock_irq(&fs_info->delayed_iput_lock);
3604 	iput(&inode->vfs_inode);
3605 	if (atomic_dec_and_test(&fs_info->nr_delayed_iputs))
3606 		wake_up(&fs_info->delayed_iputs_wait);
3607 	spin_lock_irq(&fs_info->delayed_iput_lock);
3608 }
3609 
btrfs_run_delayed_iput(struct btrfs_fs_info * fs_info,struct btrfs_inode * inode)3610 static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info,
3611 				   struct btrfs_inode *inode)
3612 {
3613 	if (!list_empty(&inode->delayed_iput)) {
3614 		spin_lock_irq(&fs_info->delayed_iput_lock);
3615 		if (!list_empty(&inode->delayed_iput))
3616 			run_delayed_iput_locked(fs_info, inode);
3617 		spin_unlock_irq(&fs_info->delayed_iput_lock);
3618 	}
3619 }
3620 
btrfs_run_delayed_iputs(struct btrfs_fs_info * fs_info)3621 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
3622 {
3623 	/*
3624 	 * btrfs_put_ordered_extent() can run in irq context (see bio.c), which
3625 	 * calls btrfs_add_delayed_iput() and that needs to lock
3626 	 * fs_info->delayed_iput_lock. So we need to disable irqs here to
3627 	 * prevent a deadlock.
3628 	 */
3629 	spin_lock_irq(&fs_info->delayed_iput_lock);
3630 	while (!list_empty(&fs_info->delayed_iputs)) {
3631 		struct btrfs_inode *inode;
3632 
3633 		inode = list_first_entry(&fs_info->delayed_iputs,
3634 				struct btrfs_inode, delayed_iput);
3635 		run_delayed_iput_locked(fs_info, inode);
3636 		if (need_resched()) {
3637 			spin_unlock_irq(&fs_info->delayed_iput_lock);
3638 			cond_resched();
3639 			spin_lock_irq(&fs_info->delayed_iput_lock);
3640 		}
3641 	}
3642 	spin_unlock_irq(&fs_info->delayed_iput_lock);
3643 }
3644 
3645 /*
3646  * Wait for flushing all delayed iputs
3647  *
3648  * @fs_info:  the filesystem
3649  *
3650  * This will wait on any delayed iputs that are currently running with KILLABLE
3651  * set.  Once they are all done running we will return, unless we are killed in
3652  * which case we return EINTR. This helps in user operations like fallocate etc
3653  * that might get blocked on the iputs.
3654  *
3655  * Return EINTR if we were killed, 0 if nothing's pending
3656  */
btrfs_wait_on_delayed_iputs(struct btrfs_fs_info * fs_info)3657 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info)
3658 {
3659 	int ret = wait_event_killable(fs_info->delayed_iputs_wait,
3660 			atomic_read(&fs_info->nr_delayed_iputs) == 0);
3661 	if (ret)
3662 		return -EINTR;
3663 	return 0;
3664 }
3665 
3666 /*
3667  * This creates an orphan entry for the given inode in case something goes wrong
3668  * in the middle of an unlink.
3669  */
btrfs_orphan_add(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)3670 int btrfs_orphan_add(struct btrfs_trans_handle *trans,
3671 		     struct btrfs_inode *inode)
3672 {
3673 	int ret;
3674 
3675 	ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode));
3676 	if (unlikely(ret && ret != -EEXIST)) {
3677 		btrfs_abort_transaction(trans, ret);
3678 		return ret;
3679 	}
3680 
3681 	return 0;
3682 }
3683 
3684 /*
3685  * We have done the delete so we can go ahead and remove the orphan item for
3686  * this particular inode.
3687  */
btrfs_orphan_del(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)3688 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3689 			    struct btrfs_inode *inode)
3690 {
3691 	return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode));
3692 }
3693 
3694 /*
3695  * this cleans up any orphans that may be left on the list from the last use
3696  * of this root.
3697  */
btrfs_orphan_cleanup(struct btrfs_root * root)3698 int btrfs_orphan_cleanup(struct btrfs_root *root)
3699 {
3700 	struct btrfs_fs_info *fs_info = root->fs_info;
3701 	BTRFS_PATH_AUTO_FREE(path);
3702 	struct extent_buffer *leaf;
3703 	struct btrfs_key key, found_key;
3704 	struct btrfs_trans_handle *trans;
3705 	u64 last_objectid = 0;
3706 	int ret = 0, nr_unlink = 0;
3707 
3708 	if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state))
3709 		return 0;
3710 
3711 	path = btrfs_alloc_path();
3712 	if (!path) {
3713 		ret = -ENOMEM;
3714 		goto out;
3715 	}
3716 	path->reada = READA_BACK;
3717 
3718 	key.objectid = BTRFS_ORPHAN_OBJECTID;
3719 	key.type = BTRFS_ORPHAN_ITEM_KEY;
3720 	key.offset = (u64)-1;
3721 
3722 	while (1) {
3723 		struct btrfs_inode *inode;
3724 
3725 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3726 		if (ret < 0)
3727 			goto out;
3728 
3729 		/*
3730 		 * if ret == 0 means we found what we were searching for, which
3731 		 * is weird, but possible, so only screw with path if we didn't
3732 		 * find the key and see if we have stuff that matches
3733 		 */
3734 		if (ret > 0) {
3735 			ret = 0;
3736 			if (path->slots[0] == 0)
3737 				break;
3738 			path->slots[0]--;
3739 		}
3740 
3741 		/* pull out the item */
3742 		leaf = path->nodes[0];
3743 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3744 
3745 		/* make sure the item matches what we want */
3746 		if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3747 			break;
3748 		if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
3749 			break;
3750 
3751 		/* release the path since we're done with it */
3752 		btrfs_release_path(path);
3753 
3754 		/*
3755 		 * this is where we are basically btrfs_lookup, without the
3756 		 * crossing root thing.  we store the inode number in the
3757 		 * offset of the orphan item.
3758 		 */
3759 
3760 		if (found_key.offset == last_objectid) {
3761 			/*
3762 			 * We found the same inode as before. This means we were
3763 			 * not able to remove its items via eviction triggered
3764 			 * by an iput(). A transaction abort may have happened,
3765 			 * due to -ENOSPC for example, so try to grab the error
3766 			 * that lead to a transaction abort, if any.
3767 			 */
3768 			btrfs_err(fs_info,
3769 				  "Error removing orphan entry, stopping orphan cleanup");
3770 			ret = BTRFS_FS_ERROR(fs_info) ?: -EINVAL;
3771 			goto out;
3772 		}
3773 
3774 		last_objectid = found_key.offset;
3775 
3776 		found_key.objectid = found_key.offset;
3777 		found_key.type = BTRFS_INODE_ITEM_KEY;
3778 		found_key.offset = 0;
3779 		inode = btrfs_iget(last_objectid, root);
3780 		if (IS_ERR(inode)) {
3781 			ret = PTR_ERR(inode);
3782 			inode = NULL;
3783 			if (ret != -ENOENT)
3784 				goto out;
3785 		}
3786 
3787 		if (!inode && root == fs_info->tree_root) {
3788 			struct btrfs_root *dead_root;
3789 			int is_dead_root = 0;
3790 
3791 			/*
3792 			 * This is an orphan in the tree root. Currently these
3793 			 * could come from 2 sources:
3794 			 *  a) a root (snapshot/subvolume) deletion in progress
3795 			 *  b) a free space cache inode
3796 			 * We need to distinguish those two, as the orphan item
3797 			 * for a root must not get deleted before the deletion
3798 			 * of the snapshot/subvolume's tree completes.
3799 			 *
3800 			 * btrfs_find_orphan_roots() ran before us, which has
3801 			 * found all deleted roots and loaded them into
3802 			 * fs_info->fs_roots_radix. So here we can find if an
3803 			 * orphan item corresponds to a deleted root by looking
3804 			 * up the root from that radix tree.
3805 			 */
3806 
3807 			spin_lock(&fs_info->fs_roots_radix_lock);
3808 			dead_root = radix_tree_lookup(&fs_info->fs_roots_radix,
3809 							 (unsigned long)found_key.objectid);
3810 			if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0)
3811 				is_dead_root = 1;
3812 			spin_unlock(&fs_info->fs_roots_radix_lock);
3813 
3814 			if (is_dead_root) {
3815 				/* prevent this orphan from being found again */
3816 				key.offset = found_key.objectid - 1;
3817 				continue;
3818 			}
3819 
3820 		}
3821 
3822 		/*
3823 		 * If we have an inode with links, there are a couple of
3824 		 * possibilities:
3825 		 *
3826 		 * 1. We were halfway through creating fsverity metadata for the
3827 		 * file. In that case, the orphan item represents incomplete
3828 		 * fsverity metadata which must be cleaned up with
3829 		 * btrfs_drop_verity_items and deleting the orphan item.
3830 
3831 		 * 2. Old kernels (before v3.12) used to create an
3832 		 * orphan item for truncate indicating that there were possibly
3833 		 * extent items past i_size that needed to be deleted. In v3.12,
3834 		 * truncate was changed to update i_size in sync with the extent
3835 		 * items, but the (useless) orphan item was still created. Since
3836 		 * v4.18, we don't create the orphan item for truncate at all.
3837 		 *
3838 		 * So, this item could mean that we need to do a truncate, but
3839 		 * only if this filesystem was last used on a pre-v3.12 kernel
3840 		 * and was not cleanly unmounted. The odds of that are quite
3841 		 * slim, and it's a pain to do the truncate now, so just delete
3842 		 * the orphan item.
3843 		 *
3844 		 * It's also possible that this orphan item was supposed to be
3845 		 * deleted but wasn't. The inode number may have been reused,
3846 		 * but either way, we can delete the orphan item.
3847 		 */
3848 		if (!inode || inode->vfs_inode.i_nlink) {
3849 			if (inode) {
3850 				ret = btrfs_drop_verity_items(inode);
3851 				iput(&inode->vfs_inode);
3852 				inode = NULL;
3853 				if (ret)
3854 					goto out;
3855 			}
3856 			trans = btrfs_start_transaction(root, 1);
3857 			if (IS_ERR(trans)) {
3858 				ret = PTR_ERR(trans);
3859 				goto out;
3860 			}
3861 			btrfs_debug(fs_info, "auto deleting %Lu",
3862 				    found_key.objectid);
3863 			ret = btrfs_del_orphan_item(trans, root,
3864 						    found_key.objectid);
3865 			btrfs_end_transaction(trans);
3866 			if (ret)
3867 				goto out;
3868 			continue;
3869 		}
3870 
3871 		nr_unlink++;
3872 
3873 		/* this will do delete_inode and everything for us */
3874 		iput(&inode->vfs_inode);
3875 	}
3876 	/* release the path since we're done with it */
3877 	btrfs_release_path(path);
3878 
3879 	if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
3880 		trans = btrfs_join_transaction(root);
3881 		if (!IS_ERR(trans))
3882 			btrfs_end_transaction(trans);
3883 	}
3884 
3885 	if (nr_unlink)
3886 		btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink);
3887 
3888 out:
3889 	if (ret)
3890 		btrfs_err(fs_info, "could not do orphan cleanup %d", ret);
3891 	return ret;
3892 }
3893 
3894 /*
3895  * Look ahead in the leaf for xattrs. If we don't find any then we know there
3896  * can't be any ACLs.
3897  *
3898  * @leaf:       the eb leaf where to search
3899  * @slot:       the slot the inode is in
3900  * @objectid:   the objectid of the inode
3901  *
3902  * Return true if there is xattr/ACL, false otherwise.
3903  */
acls_after_inode_item(struct extent_buffer * leaf,int slot,u64 objectid,int * first_xattr_slot)3904 static noinline bool acls_after_inode_item(struct extent_buffer *leaf,
3905 					   int slot, u64 objectid,
3906 					   int *first_xattr_slot)
3907 {
3908 	u32 nritems = btrfs_header_nritems(leaf);
3909 	struct btrfs_key found_key;
3910 	static u64 xattr_access = 0;
3911 	static u64 xattr_default = 0;
3912 	int scanned = 0;
3913 
3914 	if (!xattr_access) {
3915 		xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS,
3916 					strlen(XATTR_NAME_POSIX_ACL_ACCESS));
3917 		xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT,
3918 					strlen(XATTR_NAME_POSIX_ACL_DEFAULT));
3919 	}
3920 
3921 	slot++;
3922 	*first_xattr_slot = -1;
3923 	while (slot < nritems) {
3924 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
3925 
3926 		/* We found a different objectid, there must be no ACLs. */
3927 		if (found_key.objectid != objectid)
3928 			return false;
3929 
3930 		/* We found an xattr, assume we've got an ACL. */
3931 		if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3932 			if (*first_xattr_slot == -1)
3933 				*first_xattr_slot = slot;
3934 			if (found_key.offset == xattr_access ||
3935 			    found_key.offset == xattr_default)
3936 				return true;
3937 		}
3938 
3939 		/*
3940 		 * We found a key greater than an xattr key, there can't be any
3941 		 * ACLs later on.
3942 		 */
3943 		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3944 			return false;
3945 
3946 		slot++;
3947 		scanned++;
3948 
3949 		/*
3950 		 * The item order goes like:
3951 		 * - inode
3952 		 * - inode backrefs
3953 		 * - xattrs
3954 		 * - extents,
3955 		 *
3956 		 * so if there are lots of hard links to an inode there can be
3957 		 * a lot of backrefs.  Don't waste time searching too hard,
3958 		 * this is just an optimization.
3959 		 */
3960 		if (scanned >= 8)
3961 			break;
3962 	}
3963 	/*
3964 	 * We hit the end of the leaf before we found an xattr or something
3965 	 * larger than an xattr.  We have to assume the inode has ACLs.
3966 	 */
3967 	if (*first_xattr_slot == -1)
3968 		*first_xattr_slot = slot;
3969 	return true;
3970 }
3971 
btrfs_init_file_extent_tree(struct btrfs_inode * inode)3972 static int btrfs_init_file_extent_tree(struct btrfs_inode *inode)
3973 {
3974 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3975 
3976 	if (WARN_ON_ONCE(inode->file_extent_tree))
3977 		return 0;
3978 	if (btrfs_fs_incompat(fs_info, NO_HOLES))
3979 		return 0;
3980 	if (!S_ISREG(inode->vfs_inode.i_mode))
3981 		return 0;
3982 	if (btrfs_is_free_space_inode(inode))
3983 		return 0;
3984 
3985 	inode->file_extent_tree = kmalloc_obj(struct extent_io_tree);
3986 	if (!inode->file_extent_tree)
3987 		return -ENOMEM;
3988 
3989 	btrfs_extent_io_tree_init(fs_info, inode->file_extent_tree,
3990 				  IO_TREE_INODE_FILE_EXTENT);
3991 	/* Lockdep class is set only for the file extent tree. */
3992 	lockdep_set_class(&inode->file_extent_tree->lock, &file_extent_tree_class);
3993 
3994 	return 0;
3995 }
3996 
btrfs_add_inode_to_root(struct btrfs_inode * inode,bool prealloc)3997 static int btrfs_add_inode_to_root(struct btrfs_inode *inode, bool prealloc)
3998 {
3999 	struct btrfs_root *root = inode->root;
4000 	struct btrfs_inode *existing;
4001 	const u64 ino = btrfs_ino(inode);
4002 	int ret;
4003 
4004 	if (inode_unhashed(&inode->vfs_inode))
4005 		return 0;
4006 
4007 	if (prealloc) {
4008 		ret = xa_reserve(&root->inodes, ino, GFP_NOFS);
4009 		if (ret)
4010 			return ret;
4011 	}
4012 
4013 	existing = xa_store(&root->inodes, ino, inode, GFP_ATOMIC);
4014 
4015 	if (xa_is_err(existing)) {
4016 		ret = xa_err(existing);
4017 		ASSERT(ret != -EINVAL);
4018 		ASSERT(ret != -ENOMEM);
4019 		return ret;
4020 	} else if (existing) {
4021 		WARN_ON(!(inode_state_read_once(&existing->vfs_inode) & (I_WILL_FREE | I_FREEING)));
4022 	}
4023 
4024 	return 0;
4025 }
4026 
4027 /*
4028  * Read a locked inode from the btree into the in-memory inode and add it to
4029  * its root list/tree.
4030  *
4031  * On failure clean up the inode.
4032  */
btrfs_read_locked_inode(struct btrfs_inode * inode,struct btrfs_path * path)4033 static int btrfs_read_locked_inode(struct btrfs_inode *inode, struct btrfs_path *path)
4034 {
4035 	struct btrfs_root *root = inode->root;
4036 	struct btrfs_fs_info *fs_info = root->fs_info;
4037 	struct extent_buffer *leaf;
4038 	struct btrfs_inode_item *inode_item;
4039 	struct inode *vfs_inode = &inode->vfs_inode;
4040 	struct btrfs_key location;
4041 	unsigned long ptr;
4042 	int maybe_acls;
4043 	u32 rdev;
4044 	int ret;
4045 	bool filled = false;
4046 	int first_xattr_slot;
4047 
4048 	ret = btrfs_fill_inode(inode, &rdev);
4049 	if (!ret)
4050 		filled = true;
4051 
4052 	ASSERT(path);
4053 
4054 	btrfs_get_inode_key(inode, &location);
4055 
4056 	ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
4057 	if (ret) {
4058 		/*
4059 		 * ret > 0 can come from btrfs_search_slot called by
4060 		 * btrfs_lookup_inode(), this means the inode was not found.
4061 		 */
4062 		if (ret > 0)
4063 			ret = -ENOENT;
4064 		goto out;
4065 	}
4066 
4067 	leaf = path->nodes[0];
4068 
4069 	if (filled)
4070 		goto cache_index;
4071 
4072 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
4073 				    struct btrfs_inode_item);
4074 	vfs_inode->i_mode = btrfs_inode_mode(leaf, inode_item);
4075 	set_nlink(vfs_inode, btrfs_inode_nlink(leaf, inode_item));
4076 	i_uid_write(vfs_inode, btrfs_inode_uid(leaf, inode_item));
4077 	i_gid_write(vfs_inode, btrfs_inode_gid(leaf, inode_item));
4078 	btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
4079 
4080 	inode_set_atime(vfs_inode, btrfs_timespec_sec(leaf, &inode_item->atime),
4081 			btrfs_timespec_nsec(leaf, &inode_item->atime));
4082 
4083 	inode_set_mtime(vfs_inode, btrfs_timespec_sec(leaf, &inode_item->mtime),
4084 			btrfs_timespec_nsec(leaf, &inode_item->mtime));
4085 
4086 	inode_set_ctime(vfs_inode, btrfs_timespec_sec(leaf, &inode_item->ctime),
4087 			btrfs_timespec_nsec(leaf, &inode_item->ctime));
4088 
4089 	inode->i_otime_sec = btrfs_timespec_sec(leaf, &inode_item->otime);
4090 	inode->i_otime_nsec = btrfs_timespec_nsec(leaf, &inode_item->otime);
4091 
4092 	inode_set_bytes(vfs_inode, btrfs_inode_nbytes(leaf, inode_item));
4093 	inode->generation = btrfs_inode_generation(leaf, inode_item);
4094 	inode->last_trans = btrfs_inode_transid(leaf, inode_item);
4095 
4096 	inode_set_iversion_queried(vfs_inode, btrfs_inode_sequence(leaf, inode_item));
4097 	vfs_inode->i_generation = inode->generation;
4098 	vfs_inode->i_rdev = 0;
4099 	rdev = btrfs_inode_rdev(leaf, inode_item);
4100 
4101 	if (S_ISDIR(vfs_inode->i_mode))
4102 		inode->index_cnt = (u64)-1;
4103 
4104 	btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item),
4105 				&inode->flags, &inode->ro_flags);
4106 	btrfs_update_inode_mapping_flags(inode);
4107 	btrfs_set_inode_mapping_order(inode);
4108 
4109 cache_index:
4110 	/*
4111 	 * If we were modified in the current generation and evicted from memory
4112 	 * and then re-read we need to do a full sync since we don't have any
4113 	 * idea about which extents were modified before we were evicted from
4114 	 * cache.
4115 	 *
4116 	 * This is required for both inode re-read from disk and delayed inode
4117 	 * in the delayed_nodes xarray.
4118 	 */
4119 	if (inode->last_trans == btrfs_get_fs_generation(fs_info))
4120 		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
4121 
4122 	/*
4123 	 * We don't persist the id of the transaction where an unlink operation
4124 	 * against the inode was last made. So here we assume the inode might
4125 	 * have been evicted, and therefore the exact value of last_unlink_trans
4126 	 * lost, and set it to last_trans to avoid metadata inconsistencies
4127 	 * between the inode and its parent if the inode is fsync'ed and the log
4128 	 * replayed. For example, in the scenario:
4129 	 *
4130 	 * touch mydir/foo
4131 	 * ln mydir/foo mydir/bar
4132 	 * sync
4133 	 * unlink mydir/bar
4134 	 * echo 2 > /proc/sys/vm/drop_caches   # evicts inode
4135 	 * xfs_io -c fsync mydir/foo
4136 	 * <power failure>
4137 	 * mount fs, triggers fsync log replay
4138 	 *
4139 	 * We must make sure that when we fsync our inode foo we also log its
4140 	 * parent inode, otherwise after log replay the parent still has the
4141 	 * dentry with the "bar" name but our inode foo has a link count of 1
4142 	 * and doesn't have an inode ref with the name "bar" anymore.
4143 	 *
4144 	 * Setting last_unlink_trans to last_trans is a pessimistic approach,
4145 	 * but it guarantees correctness at the expense of occasional full
4146 	 * transaction commits on fsync if our inode is a directory, or if our
4147 	 * inode is not a directory, logging its parent unnecessarily.
4148 	 */
4149 	inode->last_unlink_trans = inode->last_trans;
4150 
4151 	/*
4152 	 * Same logic as for last_unlink_trans. We don't persist the generation
4153 	 * of the last transaction where this inode was used for a reflink
4154 	 * operation, so after eviction and reloading the inode we must be
4155 	 * pessimistic and assume the last transaction that modified the inode.
4156 	 */
4157 	inode->last_reflink_trans = inode->last_trans;
4158 
4159 	path->slots[0]++;
4160 	if (vfs_inode->i_nlink != 1 ||
4161 	    path->slots[0] >= btrfs_header_nritems(leaf))
4162 		goto cache_acl;
4163 
4164 	btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
4165 	if (location.objectid != btrfs_ino(inode))
4166 		goto cache_acl;
4167 
4168 	ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4169 	if (location.type == BTRFS_INODE_REF_KEY) {
4170 		struct btrfs_inode_ref *ref;
4171 
4172 		ref = (struct btrfs_inode_ref *)ptr;
4173 		inode->dir_index = btrfs_inode_ref_index(leaf, ref);
4174 	} else if (location.type == BTRFS_INODE_EXTREF_KEY) {
4175 		struct btrfs_inode_extref *extref;
4176 
4177 		extref = (struct btrfs_inode_extref *)ptr;
4178 		inode->dir_index = btrfs_inode_extref_index(leaf, extref);
4179 	}
4180 cache_acl:
4181 	/*
4182 	 * try to precache a NULL acl entry for files that don't have
4183 	 * any xattrs or acls
4184 	 */
4185 	maybe_acls = acls_after_inode_item(leaf, path->slots[0],
4186 					   btrfs_ino(inode), &first_xattr_slot);
4187 	if (first_xattr_slot != -1) {
4188 		path->slots[0] = first_xattr_slot;
4189 		ret = btrfs_load_inode_props(inode, path);
4190 		if (ret)
4191 			btrfs_err(fs_info,
4192 				  "error loading props for ino %llu (root %llu): %d",
4193 				  btrfs_ino(inode), btrfs_root_id(root), ret);
4194 	}
4195 
4196 	/*
4197 	 * We don't need the path anymore, so release it to avoid holding a read
4198 	 * lock on a leaf while calling btrfs_init_file_extent_tree(), which can
4199 	 * allocate memory that triggers reclaim (GFP_KERNEL) and cause a locking
4200 	 * dependency.
4201 	 */
4202 	btrfs_release_path(path);
4203 
4204 	ret = btrfs_init_file_extent_tree(inode);
4205 	if (ret)
4206 		goto out;
4207 	btrfs_inode_set_file_extent_range(inode, 0,
4208 			  round_up(i_size_read(vfs_inode), fs_info->sectorsize));
4209 
4210 	if (!maybe_acls)
4211 		cache_no_acl(vfs_inode);
4212 
4213 	switch (vfs_inode->i_mode & S_IFMT) {
4214 	case S_IFREG:
4215 		vfs_inode->i_mapping->a_ops = &btrfs_aops;
4216 		vfs_inode->i_fop = &btrfs_file_operations;
4217 		vfs_inode->i_op = &btrfs_file_inode_operations;
4218 		break;
4219 	case S_IFDIR:
4220 		vfs_inode->i_fop = &btrfs_dir_file_operations;
4221 		vfs_inode->i_op = &btrfs_dir_inode_operations;
4222 		break;
4223 	case S_IFLNK:
4224 		vfs_inode->i_op = &btrfs_symlink_inode_operations;
4225 		inode_nohighmem(vfs_inode);
4226 		vfs_inode->i_mapping->a_ops = &btrfs_aops;
4227 		break;
4228 	default:
4229 		vfs_inode->i_op = &btrfs_special_inode_operations;
4230 		init_special_inode(vfs_inode, vfs_inode->i_mode, rdev);
4231 		break;
4232 	}
4233 
4234 	btrfs_sync_inode_flags_to_i_flags(inode);
4235 
4236 	ret = btrfs_add_inode_to_root(inode, true);
4237 	if (ret)
4238 		goto out;
4239 
4240 	return 0;
4241 out:
4242 	/*
4243 	 * We may have a read locked leaf and iget_failed() triggers inode
4244 	 * eviction which needs to release the delayed inode and that needs
4245 	 * to lock the delayed inode's mutex. This can cause a ABBA deadlock
4246 	 * with a task running delayed items, as that require first locking
4247 	 * the delayed inode's mutex and then modifying its subvolume btree.
4248 	 * So release the path before iget_failed().
4249 	 */
4250 	btrfs_release_path(path);
4251 	iget_failed(vfs_inode);
4252 	return ret;
4253 }
4254 
4255 /*
4256  * given a leaf and an inode, copy the inode fields into the leaf
4257  */
fill_inode_item(struct btrfs_trans_handle * trans,struct extent_buffer * leaf,struct btrfs_inode_item * item,struct inode * inode)4258 static void fill_inode_item(struct btrfs_trans_handle *trans,
4259 			    struct extent_buffer *leaf,
4260 			    struct btrfs_inode_item *item,
4261 			    struct inode *inode)
4262 {
4263 	u64 flags;
4264 
4265 	btrfs_set_inode_uid(leaf, item, i_uid_read(inode));
4266 	btrfs_set_inode_gid(leaf, item, i_gid_read(inode));
4267 	btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
4268 	btrfs_set_inode_mode(leaf, item, inode->i_mode);
4269 	btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
4270 
4271 	btrfs_set_timespec_sec(leaf, &item->atime, inode_get_atime_sec(inode));
4272 	btrfs_set_timespec_nsec(leaf, &item->atime, inode_get_atime_nsec(inode));
4273 
4274 	btrfs_set_timespec_sec(leaf, &item->mtime, inode_get_mtime_sec(inode));
4275 	btrfs_set_timespec_nsec(leaf, &item->mtime, inode_get_mtime_nsec(inode));
4276 
4277 	btrfs_set_timespec_sec(leaf, &item->ctime, inode_get_ctime_sec(inode));
4278 	btrfs_set_timespec_nsec(leaf, &item->ctime, inode_get_ctime_nsec(inode));
4279 
4280 	btrfs_set_timespec_sec(leaf, &item->otime, BTRFS_I(inode)->i_otime_sec);
4281 	btrfs_set_timespec_nsec(leaf, &item->otime, BTRFS_I(inode)->i_otime_nsec);
4282 
4283 	btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
4284 	btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
4285 	btrfs_set_inode_sequence(leaf, item, inode_peek_iversion(inode));
4286 	btrfs_set_inode_transid(leaf, item, trans->transid);
4287 	btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
4288 	flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
4289 					  BTRFS_I(inode)->ro_flags);
4290 	btrfs_set_inode_flags(leaf, item, flags);
4291 	btrfs_set_inode_block_group(leaf, item, 0);
4292 }
4293 
4294 /*
4295  * copy everything in the in-memory inode into the btree.
4296  */
btrfs_update_inode_item(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)4297 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
4298 					    struct btrfs_inode *inode)
4299 {
4300 	struct btrfs_inode_item *inode_item;
4301 	BTRFS_PATH_AUTO_FREE(path);
4302 	struct extent_buffer *leaf;
4303 	struct btrfs_key key;
4304 	int ret;
4305 
4306 	path = btrfs_alloc_path();
4307 	if (!path)
4308 		return -ENOMEM;
4309 
4310 	btrfs_get_inode_key(inode, &key);
4311 	ret = btrfs_lookup_inode(trans, inode->root, path, &key, 1);
4312 	if (ret) {
4313 		if (ret > 0)
4314 			ret = -ENOENT;
4315 		return ret;
4316 	}
4317 
4318 	leaf = path->nodes[0];
4319 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
4320 				    struct btrfs_inode_item);
4321 
4322 	fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode);
4323 	btrfs_set_inode_last_trans(trans, inode);
4324 	return 0;
4325 }
4326 
4327 /*
4328  * copy everything in the in-memory inode into the btree.
4329  */
btrfs_update_inode(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)4330 int btrfs_update_inode(struct btrfs_trans_handle *trans,
4331 		       struct btrfs_inode *inode)
4332 {
4333 	struct btrfs_root *root = inode->root;
4334 	struct btrfs_fs_info *fs_info = root->fs_info;
4335 	int ret;
4336 
4337 	/*
4338 	 * If the inode is a free space inode, we can deadlock during commit
4339 	 * if we put it into the delayed code.
4340 	 *
4341 	 * The data relocation inode should also be directly updated
4342 	 * without delay
4343 	 */
4344 	if (!btrfs_is_free_space_inode(inode)
4345 	    && !btrfs_is_data_reloc_root(root)
4346 	    && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
4347 		btrfs_update_root_times(trans, root);
4348 
4349 		ret = btrfs_delayed_update_inode(trans, inode);
4350 		if (!ret)
4351 			btrfs_set_inode_last_trans(trans, inode);
4352 		return ret;
4353 	}
4354 
4355 	return btrfs_update_inode_item(trans, inode);
4356 }
4357 
btrfs_update_inode_fallback(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)4358 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
4359 				struct btrfs_inode *inode)
4360 {
4361 	int ret;
4362 
4363 	ret = btrfs_update_inode(trans, inode);
4364 	if (ret == -ENOSPC)
4365 		return btrfs_update_inode_item(trans, inode);
4366 	return ret;
4367 }
4368 
update_time_after_link_or_unlink(struct btrfs_inode * dir)4369 static void update_time_after_link_or_unlink(struct btrfs_inode *dir)
4370 {
4371 	struct timespec64 now;
4372 
4373 	/*
4374 	 * If we are replaying a log tree, we do not want to update the mtime
4375 	 * and ctime of the parent directory with the current time, since the
4376 	 * log replay procedure is responsible for setting them to their correct
4377 	 * values (the ones it had when the fsync was done).
4378 	 */
4379 	if (test_bit(BTRFS_FS_LOG_RECOVERING, &dir->root->fs_info->flags))
4380 		return;
4381 
4382 	now = inode_set_ctime_current(&dir->vfs_inode);
4383 	inode_set_mtime_to_ts(&dir->vfs_inode, now);
4384 }
4385 
4386 /*
4387  * unlink helper that gets used here in inode.c and in the tree logging
4388  * recovery code.  It remove a link in a directory with a given name, and
4389  * also drops the back refs in the inode to the directory
4390  */
__btrfs_unlink_inode(struct btrfs_trans_handle * trans,struct btrfs_inode * dir,struct btrfs_inode * inode,const struct fscrypt_str * name,struct btrfs_rename_ctx * rename_ctx)4391 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4392 				struct btrfs_inode *dir,
4393 				struct btrfs_inode *inode,
4394 				const struct fscrypt_str *name,
4395 				struct btrfs_rename_ctx *rename_ctx)
4396 {
4397 	struct btrfs_root *root = dir->root;
4398 	struct btrfs_fs_info *fs_info = root->fs_info;
4399 	struct btrfs_path *path;
4400 	int ret = 0;
4401 	struct btrfs_dir_item *di;
4402 	u64 index;
4403 	u64 ino = btrfs_ino(inode);
4404 	u64 dir_ino = btrfs_ino(dir);
4405 
4406 	path = btrfs_alloc_path();
4407 	if (!path)
4408 		return -ENOMEM;
4409 
4410 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, -1);
4411 	if (IS_ERR_OR_NULL(di)) {
4412 		btrfs_free_path(path);
4413 		return di ? PTR_ERR(di) : -ENOENT;
4414 	}
4415 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
4416 	/*
4417 	 * Down the call chains below we'll also need to allocate a path, so no
4418 	 * need to hold on to this one for longer than necessary.
4419 	 */
4420 	btrfs_free_path(path);
4421 	if (ret)
4422 		return ret;
4423 
4424 	/*
4425 	 * If we don't have dir index, we have to get it by looking up
4426 	 * the inode ref, since we get the inode ref, remove it directly,
4427 	 * it is unnecessary to do delayed deletion.
4428 	 *
4429 	 * But if we have dir index, needn't search inode ref to get it.
4430 	 * Since the inode ref is close to the inode item, it is better
4431 	 * that we delay to delete it, and just do this deletion when
4432 	 * we update the inode item.
4433 	 */
4434 	if (inode->dir_index) {
4435 		ret = btrfs_delayed_delete_inode_ref(inode);
4436 		if (!ret) {
4437 			index = inode->dir_index;
4438 			goto skip_backref;
4439 		}
4440 	}
4441 
4442 	ret = btrfs_del_inode_ref(trans, root, name, ino, dir_ino, &index);
4443 	if (unlikely(ret)) {
4444 		btrfs_crit(fs_info,
4445 	   "failed to delete reference to %.*s, root %llu inode %llu parent %llu",
4446 			   name->len, name->name, btrfs_root_id(root), ino, dir_ino);
4447 		btrfs_abort_transaction(trans, ret);
4448 		return ret;
4449 	}
4450 skip_backref:
4451 	if (rename_ctx)
4452 		rename_ctx->index = index;
4453 
4454 	ret = btrfs_delete_delayed_dir_index(trans, dir, index);
4455 	if (unlikely(ret)) {
4456 		btrfs_abort_transaction(trans, ret);
4457 		return ret;
4458 	}
4459 
4460 	/*
4461 	 * If we are in a rename context, we don't need to update anything in the
4462 	 * log. That will be done later during the rename by btrfs_log_new_name().
4463 	 * Besides that, doing it here would only cause extra unnecessary btree
4464 	 * operations on the log tree, increasing latency for applications.
4465 	 */
4466 	if (!rename_ctx) {
4467 		btrfs_del_inode_ref_in_log(trans, name, inode, dir);
4468 		btrfs_del_dir_entries_in_log(trans, name, dir, index);
4469 	}
4470 
4471 	/*
4472 	 * If we have a pending delayed iput we could end up with the final iput
4473 	 * being run in btrfs-cleaner context.  If we have enough of these built
4474 	 * up we can end up burning a lot of time in btrfs-cleaner without any
4475 	 * way to throttle the unlinks.  Since we're currently holding a ref on
4476 	 * the inode we can run the delayed iput here without any issues as the
4477 	 * final iput won't be done until after we drop the ref we're currently
4478 	 * holding.
4479 	 */
4480 	btrfs_run_delayed_iput(fs_info, inode);
4481 
4482 	btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2);
4483 	inode_inc_iversion(&inode->vfs_inode);
4484 	inode_set_ctime_current(&inode->vfs_inode);
4485 	inode_inc_iversion(&dir->vfs_inode);
4486 	update_time_after_link_or_unlink(dir);
4487 
4488 	return btrfs_update_inode(trans, dir);
4489 }
4490 
btrfs_unlink_inode(struct btrfs_trans_handle * trans,struct btrfs_inode * dir,struct btrfs_inode * inode,const struct fscrypt_str * name)4491 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4492 		       struct btrfs_inode *dir, struct btrfs_inode *inode,
4493 		       const struct fscrypt_str *name)
4494 {
4495 	int ret;
4496 
4497 	ret = __btrfs_unlink_inode(trans, dir, inode, name, NULL);
4498 	if (!ret) {
4499 		drop_nlink(&inode->vfs_inode);
4500 		ret = btrfs_update_inode(trans, inode);
4501 	}
4502 	return ret;
4503 }
4504 
4505 /*
4506  * helper to start transaction for unlink and rmdir.
4507  *
4508  * unlink and rmdir are special in btrfs, they do not always free space, so
4509  * if we cannot make our reservations the normal way try and see if there is
4510  * plenty of slack room in the global reserve to migrate, otherwise we cannot
4511  * allow the unlink to occur.
4512  */
__unlink_start_trans(struct btrfs_inode * dir)4513 static struct btrfs_trans_handle *__unlink_start_trans(struct btrfs_inode *dir)
4514 {
4515 	struct btrfs_root *root = dir->root;
4516 
4517 	return btrfs_start_transaction_fallback_global_rsv(root,
4518 						   BTRFS_UNLINK_METADATA_UNITS);
4519 }
4520 
btrfs_unlink(struct inode * dir,struct dentry * dentry)4521 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
4522 {
4523 	struct btrfs_trans_handle *trans;
4524 	struct inode *inode = d_inode(dentry);
4525 	int ret;
4526 	struct fscrypt_name fname;
4527 
4528 	ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
4529 	if (ret)
4530 		return ret;
4531 
4532 	/* This needs to handle no-key deletions later on */
4533 
4534 	trans = __unlink_start_trans(BTRFS_I(dir));
4535 	if (IS_ERR(trans)) {
4536 		ret = PTR_ERR(trans);
4537 		goto fscrypt_free;
4538 	}
4539 
4540 	btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4541 				false);
4542 
4543 	ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4544 				 &fname.disk_name);
4545 	if (ret)
4546 		goto end_trans;
4547 
4548 	if (inode->i_nlink == 0) {
4549 		ret = btrfs_orphan_add(trans, BTRFS_I(inode));
4550 		if (ret)
4551 			goto end_trans;
4552 	}
4553 
4554 end_trans:
4555 	btrfs_end_transaction(trans);
4556 	btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info);
4557 fscrypt_free:
4558 	fscrypt_free_filename(&fname);
4559 	return ret;
4560 }
4561 
btrfs_unlink_subvol(struct btrfs_trans_handle * trans,struct btrfs_inode * dir,struct dentry * dentry)4562 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4563 			       struct btrfs_inode *dir, struct dentry *dentry)
4564 {
4565 	struct btrfs_root *root = dir->root;
4566 	struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
4567 	BTRFS_PATH_AUTO_FREE(path);
4568 	struct extent_buffer *leaf;
4569 	struct btrfs_dir_item *di;
4570 	struct btrfs_key key;
4571 	u64 index;
4572 	int ret;
4573 	u64 objectid;
4574 	u64 dir_ino = btrfs_ino(dir);
4575 	struct fscrypt_name fname;
4576 
4577 	ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname);
4578 	if (ret)
4579 		return ret;
4580 
4581 	/* This needs to handle no-key deletions later on */
4582 
4583 	if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
4584 		objectid = btrfs_root_id(inode->root);
4585 	} else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4586 		objectid = inode->ref_root_id;
4587 	} else {
4588 		WARN_ON(1);
4589 		fscrypt_free_filename(&fname);
4590 		return -EINVAL;
4591 	}
4592 
4593 	path = btrfs_alloc_path();
4594 	if (!path) {
4595 		ret = -ENOMEM;
4596 		goto out;
4597 	}
4598 
4599 	di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4600 				   &fname.disk_name, -1);
4601 	if (IS_ERR_OR_NULL(di)) {
4602 		ret = di ? PTR_ERR(di) : -ENOENT;
4603 		goto out;
4604 	}
4605 
4606 	leaf = path->nodes[0];
4607 	btrfs_dir_item_key_to_cpu(leaf, di, &key);
4608 	WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
4609 	ret = btrfs_delete_one_dir_name(trans, root, path, di);
4610 	if (unlikely(ret)) {
4611 		btrfs_abort_transaction(trans, ret);
4612 		goto out;
4613 	}
4614 	btrfs_release_path(path);
4615 
4616 	/*
4617 	 * This is a placeholder inode for a subvolume we didn't have a
4618 	 * reference to at the time of the snapshot creation.  In the meantime
4619 	 * we could have renamed the real subvol link into our snapshot, so
4620 	 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect.
4621 	 * Instead simply lookup the dir_index_item for this entry so we can
4622 	 * remove it.  Otherwise we know we have a ref to the root and we can
4623 	 * call btrfs_del_root_ref, and it _shouldn't_ fail.
4624 	 */
4625 	if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4626 		di = btrfs_search_dir_index_item(root, path, dir_ino, &fname.disk_name);
4627 		if (IS_ERR(di)) {
4628 			ret = PTR_ERR(di);
4629 			btrfs_abort_transaction(trans, ret);
4630 			goto out;
4631 		}
4632 
4633 		leaf = path->nodes[0];
4634 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4635 		index = key.offset;
4636 		btrfs_release_path(path);
4637 	} else {
4638 		ret = btrfs_del_root_ref(trans, objectid,
4639 					 btrfs_root_id(root), dir_ino,
4640 					 &index, &fname.disk_name);
4641 		if (unlikely(ret)) {
4642 			btrfs_abort_transaction(trans, ret);
4643 			goto out;
4644 		}
4645 	}
4646 
4647 	ret = btrfs_delete_delayed_dir_index(trans, dir, index);
4648 	if (unlikely(ret)) {
4649 		btrfs_abort_transaction(trans, ret);
4650 		goto out;
4651 	}
4652 
4653 	btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2);
4654 	inode_inc_iversion(&dir->vfs_inode);
4655 	inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode));
4656 	ret = btrfs_update_inode_fallback(trans, dir);
4657 	if (ret)
4658 		btrfs_abort_transaction(trans, ret);
4659 out:
4660 	fscrypt_free_filename(&fname);
4661 	return ret;
4662 }
4663 
4664 /*
4665  * Helper to check if the subvolume references other subvolumes or if it's
4666  * default.
4667  */
may_destroy_subvol(struct btrfs_root * root)4668 static noinline int may_destroy_subvol(struct btrfs_root *root)
4669 {
4670 	struct btrfs_fs_info *fs_info = root->fs_info;
4671 	BTRFS_PATH_AUTO_FREE(path);
4672 	struct btrfs_dir_item *di;
4673 	struct btrfs_key key;
4674 	struct fscrypt_str name = FSTR_INIT("default", 7);
4675 	u64 dir_id;
4676 	int ret;
4677 
4678 	path = btrfs_alloc_path();
4679 	if (!path)
4680 		return -ENOMEM;
4681 
4682 	/* Make sure this root isn't set as the default subvol */
4683 	dir_id = btrfs_super_root_dir(fs_info->super_copy);
4684 	di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path,
4685 				   dir_id, &name, 0);
4686 	if (di && !IS_ERR(di)) {
4687 		btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
4688 		if (key.objectid == btrfs_root_id(root)) {
4689 			ret = -EPERM;
4690 			btrfs_err(fs_info,
4691 				  "deleting default subvolume %llu is not allowed",
4692 				  key.objectid);
4693 			return ret;
4694 		}
4695 		btrfs_release_path(path);
4696 	}
4697 
4698 	key.objectid = btrfs_root_id(root);
4699 	key.type = BTRFS_ROOT_REF_KEY;
4700 	key.offset = (u64)-1;
4701 
4702 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4703 	if (ret < 0)
4704 		return ret;
4705 	if (unlikely(ret == 0)) {
4706 		/*
4707 		 * Key with offset -1 found, there would have to exist a root
4708 		 * with such id, but this is out of valid range.
4709 		 */
4710 		return -EUCLEAN;
4711 	}
4712 
4713 	ret = 0;
4714 	if (path->slots[0] > 0) {
4715 		path->slots[0]--;
4716 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
4717 		if (key.objectid == btrfs_root_id(root) && key.type == BTRFS_ROOT_REF_KEY)
4718 			ret = -ENOTEMPTY;
4719 	}
4720 
4721 	return ret;
4722 }
4723 
4724 /* Delete all dentries for inodes belonging to the root */
btrfs_prune_dentries(struct btrfs_root * root)4725 static void btrfs_prune_dentries(struct btrfs_root *root)
4726 {
4727 	struct btrfs_fs_info *fs_info = root->fs_info;
4728 	struct btrfs_inode *inode;
4729 	u64 min_ino = 0;
4730 
4731 	if (!BTRFS_FS_ERROR(fs_info))
4732 		WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4733 
4734 	inode = btrfs_find_first_inode(root, min_ino);
4735 	while (inode) {
4736 		if (icount_read(&inode->vfs_inode) > 1)
4737 			d_prune_aliases(&inode->vfs_inode);
4738 
4739 		min_ino = btrfs_ino(inode) + 1;
4740 		/*
4741 		 * btrfs_drop_inode() will have it removed from the inode
4742 		 * cache when its usage count hits zero.
4743 		 */
4744 		iput(&inode->vfs_inode);
4745 		cond_resched();
4746 		inode = btrfs_find_first_inode(root, min_ino);
4747 	}
4748 }
4749 
btrfs_delete_subvolume(struct btrfs_inode * dir,struct dentry * dentry)4750 int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
4751 {
4752 	struct btrfs_root *root = dir->root;
4753 	struct btrfs_fs_info *fs_info = root->fs_info;
4754 	struct inode *inode = d_inode(dentry);
4755 	struct btrfs_root *dest = BTRFS_I(inode)->root;
4756 	struct btrfs_trans_handle *trans;
4757 	struct btrfs_block_rsv block_rsv;
4758 	u64 root_flags;
4759 	u64 qgroup_reserved = 0;
4760 	int ret;
4761 
4762 	down_write(&fs_info->subvol_sem);
4763 
4764 	/*
4765 	 * Don't allow to delete a subvolume with send in progress. This is
4766 	 * inside the inode lock so the error handling that has to drop the bit
4767 	 * again is not run concurrently.
4768 	 */
4769 	spin_lock(&dest->root_item_lock);
4770 	if (dest->send_in_progress) {
4771 		spin_unlock(&dest->root_item_lock);
4772 		btrfs_warn(fs_info,
4773 			   "attempt to delete subvolume %llu during send",
4774 			   btrfs_root_id(dest));
4775 		ret = -EPERM;
4776 		goto out_up_write;
4777 	}
4778 	if (atomic_read(&dest->nr_swapfiles)) {
4779 		spin_unlock(&dest->root_item_lock);
4780 		btrfs_warn(fs_info,
4781 			   "attempt to delete subvolume %llu with active swapfile",
4782 			   btrfs_root_id(dest));
4783 		ret = -EPERM;
4784 		goto out_up_write;
4785 	}
4786 	root_flags = btrfs_root_flags(&dest->root_item);
4787 	btrfs_set_root_flags(&dest->root_item,
4788 			     root_flags | BTRFS_ROOT_SUBVOL_DEAD);
4789 	spin_unlock(&dest->root_item_lock);
4790 
4791 	ret = may_destroy_subvol(dest);
4792 	if (ret)
4793 		goto out_undead;
4794 
4795 	btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
4796 	/*
4797 	 * One for dir inode,
4798 	 * two for dir entries,
4799 	 * two for root ref/backref.
4800 	 */
4801 	ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
4802 	if (ret)
4803 		goto out_undead;
4804 	qgroup_reserved = block_rsv.qgroup_rsv_reserved;
4805 
4806 	trans = btrfs_start_transaction(root, 0);
4807 	if (IS_ERR(trans)) {
4808 		ret = PTR_ERR(trans);
4809 		goto out_release;
4810 	}
4811 	btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
4812 	qgroup_reserved = 0;
4813 	trans->block_rsv = &block_rsv;
4814 	trans->bytes_reserved = block_rsv.size;
4815 
4816 	btrfs_record_snapshot_destroy(trans, dir);
4817 
4818 	ret = btrfs_unlink_subvol(trans, dir, dentry);
4819 	if (unlikely(ret)) {
4820 		btrfs_abort_transaction(trans, ret);
4821 		goto out_end_trans;
4822 	}
4823 
4824 	ret = btrfs_record_root_in_trans(trans, dest);
4825 	if (unlikely(ret)) {
4826 		btrfs_abort_transaction(trans, ret);
4827 		goto out_end_trans;
4828 	}
4829 
4830 	memset(&dest->root_item.drop_progress, 0,
4831 		sizeof(dest->root_item.drop_progress));
4832 	btrfs_set_root_drop_level(&dest->root_item, 0);
4833 	btrfs_set_root_refs(&dest->root_item, 0);
4834 
4835 	if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
4836 		ret = btrfs_insert_orphan_item(trans,
4837 					fs_info->tree_root,
4838 					btrfs_root_id(dest));
4839 		if (unlikely(ret)) {
4840 			btrfs_abort_transaction(trans, ret);
4841 			goto out_end_trans;
4842 		}
4843 	}
4844 
4845 	ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid,
4846 				     BTRFS_UUID_KEY_SUBVOL, btrfs_root_id(dest));
4847 	if (unlikely(ret && ret != -ENOENT)) {
4848 		btrfs_abort_transaction(trans, ret);
4849 		goto out_end_trans;
4850 	}
4851 	if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
4852 		ret = btrfs_uuid_tree_remove(trans,
4853 					  dest->root_item.received_uuid,
4854 					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4855 					  btrfs_root_id(dest));
4856 		if (unlikely(ret && ret != -ENOENT)) {
4857 			btrfs_abort_transaction(trans, ret);
4858 			goto out_end_trans;
4859 		}
4860 	}
4861 
4862 	free_anon_bdev(dest->anon_dev);
4863 	dest->anon_dev = 0;
4864 out_end_trans:
4865 	trans->block_rsv = NULL;
4866 	trans->bytes_reserved = 0;
4867 	ret = btrfs_end_transaction(trans);
4868 	inode->i_flags |= S_DEAD;
4869 out_release:
4870 	btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL);
4871 	if (qgroup_reserved)
4872 		btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
4873 out_undead:
4874 	if (ret) {
4875 		spin_lock(&dest->root_item_lock);
4876 		root_flags = btrfs_root_flags(&dest->root_item);
4877 		btrfs_set_root_flags(&dest->root_item,
4878 				root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
4879 		spin_unlock(&dest->root_item_lock);
4880 	}
4881 out_up_write:
4882 	up_write(&fs_info->subvol_sem);
4883 	if (!ret) {
4884 		d_invalidate(dentry);
4885 		btrfs_prune_dentries(dest);
4886 		ASSERT(dest->send_in_progress == 0);
4887 	}
4888 
4889 	return ret;
4890 }
4891 
btrfs_rmdir(struct inode * vfs_dir,struct dentry * dentry)4892 static int btrfs_rmdir(struct inode *vfs_dir, struct dentry *dentry)
4893 {
4894 	struct btrfs_inode *dir = BTRFS_I(vfs_dir);
4895 	struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
4896 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
4897 	int ret = 0;
4898 	struct btrfs_trans_handle *trans;
4899 	struct fscrypt_name fname;
4900 
4901 	if (inode->vfs_inode.i_size > BTRFS_EMPTY_DIR_SIZE)
4902 		return -ENOTEMPTY;
4903 	if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
4904 		if (unlikely(btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))) {
4905 			btrfs_err(fs_info,
4906 			"extent tree v2 doesn't support snapshot deletion yet");
4907 			return -EOPNOTSUPP;
4908 		}
4909 		return btrfs_delete_subvolume(dir, dentry);
4910 	}
4911 
4912 	ret = fscrypt_setup_filename(vfs_dir, &dentry->d_name, 1, &fname);
4913 	if (ret)
4914 		return ret;
4915 
4916 	/* This needs to handle no-key deletions later on */
4917 
4918 	trans = __unlink_start_trans(dir);
4919 	if (IS_ERR(trans)) {
4920 		ret = PTR_ERR(trans);
4921 		goto out_notrans;
4922 	}
4923 
4924 	/*
4925 	 * Propagate the last_unlink_trans value of the deleted dir to its
4926 	 * parent directory. This is to prevent an unrecoverable log tree in the
4927 	 * case we do something like this:
4928 	 * 1) create dir foo
4929 	 * 2) create snapshot under dir foo
4930 	 * 3) delete the snapshot
4931 	 * 4) rmdir foo
4932 	 * 5) mkdir foo
4933 	 * 6) fsync foo or some file inside foo
4934 	 *
4935 	 * This is because we can't unlink other roots when replaying the dir
4936 	 * deletes for directory foo.
4937 	 */
4938 	if (inode->last_unlink_trans >= trans->transid)
4939 		btrfs_record_snapshot_destroy(trans, dir);
4940 
4941 	if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4942 		ret = btrfs_unlink_subvol(trans, dir, dentry);
4943 		goto out;
4944 	}
4945 
4946 	ret = btrfs_orphan_add(trans, inode);
4947 	if (ret)
4948 		goto out;
4949 
4950 	/* now the directory is empty */
4951 	ret = btrfs_unlink_inode(trans, dir, inode, &fname.disk_name);
4952 	if (!ret)
4953 		btrfs_i_size_write(inode, 0);
4954 out:
4955 	btrfs_end_transaction(trans);
4956 out_notrans:
4957 	btrfs_btree_balance_dirty(fs_info);
4958 	fscrypt_free_filename(&fname);
4959 
4960 	return ret;
4961 }
4962 
is_inside_block(u64 bytenr,u64 blockstart,u32 blocksize)4963 static bool is_inside_block(u64 bytenr, u64 blockstart, u32 blocksize)
4964 {
4965 	ASSERT(IS_ALIGNED(blockstart, blocksize), "blockstart=%llu blocksize=%u",
4966 		blockstart, blocksize);
4967 
4968 	if (blockstart <= bytenr && bytenr <= blockstart + blocksize - 1)
4969 		return true;
4970 	return false;
4971 }
4972 
truncate_block_zero_beyond_eof(struct btrfs_inode * inode,u64 start)4973 static int truncate_block_zero_beyond_eof(struct btrfs_inode *inode, u64 start)
4974 {
4975 	const pgoff_t index = (start >> PAGE_SHIFT);
4976 	struct address_space *mapping = inode->vfs_inode.i_mapping;
4977 	struct folio *folio;
4978 	u64 zero_start;
4979 	u64 zero_end;
4980 	int ret = 0;
4981 
4982 again:
4983 	folio = filemap_lock_folio(mapping, index);
4984 	/* No folio present. */
4985 	if (IS_ERR(folio))
4986 		return 0;
4987 
4988 	if (!folio_test_uptodate(folio)) {
4989 		ret = btrfs_read_folio(NULL, folio);
4990 		folio_lock(folio);
4991 		if (folio->mapping != mapping) {
4992 			folio_unlock(folio);
4993 			folio_put(folio);
4994 			goto again;
4995 		}
4996 		if (unlikely(!folio_test_uptodate(folio))) {
4997 			ret = -EIO;
4998 			goto out_unlock;
4999 		}
5000 	}
5001 	folio_wait_writeback(folio);
5002 
5003 	/*
5004 	 * We do not need to lock extents nor wait for OE, as it's already
5005 	 * beyond EOF.
5006 	 */
5007 
5008 	zero_start = max_t(u64, folio_pos(folio), start);
5009 	zero_end = folio_next_pos(folio);
5010 	folio_zero_range(folio, zero_start - folio_pos(folio),
5011 			 zero_end - zero_start);
5012 
5013 out_unlock:
5014 	folio_unlock(folio);
5015 	folio_put(folio);
5016 	return ret;
5017 }
5018 
5019 /*
5020  * Handle the truncation of a fs block.
5021  *
5022  * @inode  - inode that we're zeroing
5023  * @offset - the file offset of the block to truncate
5024  *           The value must be inside [@start, @end], and the function will do
5025  *           extra checks if the block that covers @offset needs to be zeroed.
5026  * @start  - the start file offset of the range we want to zero
5027  * @end    - the end (inclusive) file offset of the range we want to zero.
5028  *
5029  * If the range is not block aligned, read out the folio that covers @offset,
5030  * and if needed zero blocks that are inside the folio and covered by [@start, @end).
5031  * If @start or @end + 1 lands inside a block, that block will be marked dirty
5032  * for writeback.
5033  *
5034  * This is utilized by hole punch, zero range, file expansion.
5035  */
btrfs_truncate_block(struct btrfs_inode * inode,u64 offset,u64 start,u64 end)5036 int btrfs_truncate_block(struct btrfs_inode *inode, u64 offset, u64 start, u64 end)
5037 {
5038 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
5039 	struct address_space *mapping = inode->vfs_inode.i_mapping;
5040 	struct extent_io_tree *io_tree = &inode->io_tree;
5041 	struct btrfs_ordered_extent *ordered;
5042 	struct extent_state *cached_state = NULL;
5043 	struct extent_changeset *data_reserved = NULL;
5044 	bool only_release_metadata = false;
5045 	u32 blocksize = fs_info->sectorsize;
5046 	pgoff_t index = (offset >> PAGE_SHIFT);
5047 	struct folio *folio;
5048 	gfp_t mask = btrfs_alloc_write_mask(mapping);
5049 	int ret = 0;
5050 	const bool in_head_block = is_inside_block(offset, round_down(start, blocksize),
5051 						   blocksize);
5052 	const bool in_tail_block = is_inside_block(offset, round_down(end, blocksize),
5053 						   blocksize);
5054 	bool need_truncate_head = false;
5055 	bool need_truncate_tail = false;
5056 	u64 zero_start;
5057 	u64 zero_end;
5058 	u64 block_start;
5059 	u64 block_end;
5060 
5061 	/* @offset should be inside the range. */
5062 	ASSERT(start <= offset && offset <= end, "offset=%llu start=%llu end=%llu",
5063 	       offset, start, end);
5064 
5065 	/* The range is aligned at both ends. */
5066 	if (IS_ALIGNED(start, blocksize) && IS_ALIGNED(end + 1, blocksize)) {
5067 		/*
5068 		 * For block size < page size case, we may have polluted blocks
5069 		 * beyond EOF. So we also need to zero them out.
5070 		 */
5071 		if (end == (u64)-1 && blocksize < PAGE_SIZE)
5072 			ret = truncate_block_zero_beyond_eof(inode, start);
5073 		goto out;
5074 	}
5075 
5076 	/*
5077 	 * @offset may not be inside the head nor tail block. In that case we
5078 	 * don't need to do anything.
5079 	 */
5080 	if (!in_head_block && !in_tail_block)
5081 		goto out;
5082 
5083 	/*
5084 	 * Skip the truncation if the range in the target block is already aligned.
5085 	 * The seemingly complex check will also handle the same block case.
5086 	 */
5087 	if (in_head_block && !IS_ALIGNED(start, blocksize))
5088 		need_truncate_head = true;
5089 	if (in_tail_block && !IS_ALIGNED(end + 1, blocksize))
5090 		need_truncate_tail = true;
5091 	if (!need_truncate_head && !need_truncate_tail)
5092 		goto out;
5093 
5094 	block_start = round_down(offset, blocksize);
5095 	block_end = block_start + blocksize - 1;
5096 
5097 	ret = btrfs_check_data_free_space(inode, &data_reserved, block_start,
5098 					  blocksize, false);
5099 	if (ret < 0) {
5100 		size_t write_bytes = blocksize;
5101 
5102 		if (btrfs_check_nocow_lock(inode, block_start, &write_bytes, false) > 0) {
5103 			/* For nocow case, no need to reserve data space. */
5104 			ASSERT(write_bytes == blocksize, "write_bytes=%zu blocksize=%u",
5105 			       write_bytes, blocksize);
5106 			only_release_metadata = true;
5107 		} else {
5108 			goto out;
5109 		}
5110 	}
5111 	ret = btrfs_delalloc_reserve_metadata(inode, blocksize, blocksize, false);
5112 	if (ret < 0) {
5113 		if (!only_release_metadata)
5114 			btrfs_free_reserved_data_space(inode, data_reserved,
5115 						       block_start, blocksize);
5116 		goto out;
5117 	}
5118 again:
5119 	folio = __filemap_get_folio(mapping, index,
5120 				    FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask);
5121 	if (IS_ERR(folio)) {
5122 		if (only_release_metadata)
5123 			btrfs_delalloc_release_metadata(inode, blocksize, true);
5124 		else
5125 			btrfs_delalloc_release_space(inode, data_reserved,
5126 						     block_start, blocksize, true);
5127 		btrfs_delalloc_release_extents(inode, blocksize);
5128 		ret = PTR_ERR(folio);
5129 		goto out;
5130 	}
5131 
5132 	if (!folio_test_uptodate(folio)) {
5133 		ret = btrfs_read_folio(NULL, folio);
5134 		folio_lock(folio);
5135 		if (folio->mapping != mapping) {
5136 			folio_unlock(folio);
5137 			folio_put(folio);
5138 			goto again;
5139 		}
5140 		if (unlikely(!folio_test_uptodate(folio))) {
5141 			ret = -EIO;
5142 			goto out_unlock;
5143 		}
5144 	}
5145 
5146 	/*
5147 	 * We unlock the page after the io is completed and then re-lock it
5148 	 * above.  release_folio() could have come in between that and cleared
5149 	 * folio private, but left the page in the mapping.  Set the page mapped
5150 	 * here to make sure it's properly set for the subpage stuff.
5151 	 */
5152 	ret = set_folio_extent_mapped(folio);
5153 	if (ret < 0)
5154 		goto out_unlock;
5155 
5156 	folio_wait_writeback(folio);
5157 
5158 	btrfs_lock_extent(io_tree, block_start, block_end, &cached_state);
5159 
5160 	ordered = btrfs_lookup_ordered_extent(inode, block_start);
5161 	if (ordered) {
5162 		btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
5163 		folio_unlock(folio);
5164 		folio_put(folio);
5165 		btrfs_start_ordered_extent(ordered);
5166 		btrfs_put_ordered_extent(ordered);
5167 		goto again;
5168 	}
5169 
5170 	btrfs_clear_extent_bit(&inode->io_tree, block_start, block_end,
5171 			       EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
5172 			       &cached_state);
5173 
5174 	ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
5175 					&cached_state);
5176 	if (ret) {
5177 		btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
5178 		goto out_unlock;
5179 	}
5180 
5181 	if (end == (u64)-1) {
5182 		/*
5183 		 * We're truncating beyond EOF, the remaining blocks normally are
5184 		 * already holes thus no need to zero again, but it's possible for
5185 		 * fs block size < page size cases to have memory mapped writes
5186 		 * to pollute ranges beyond EOF.
5187 		 *
5188 		 * In that case although such polluted blocks beyond EOF will
5189 		 * not reach disk, it still affects our page caches.
5190 		 */
5191 		zero_start = max_t(u64, folio_pos(folio), start);
5192 		zero_end = min_t(u64, folio_next_pos(folio) - 1, end);
5193 	} else {
5194 		zero_start = max_t(u64, block_start, start);
5195 		zero_end = min_t(u64, block_end, end);
5196 	}
5197 	folio_zero_range(folio, zero_start - folio_pos(folio),
5198 			 zero_end - zero_start + 1);
5199 
5200 	btrfs_folio_clear_checked(fs_info, folio, block_start,
5201 				  block_end + 1 - block_start);
5202 	btrfs_folio_set_dirty(fs_info, folio, block_start,
5203 			      block_end + 1 - block_start);
5204 
5205 	if (only_release_metadata)
5206 		btrfs_set_extent_bit(&inode->io_tree, block_start, block_end,
5207 				     EXTENT_NORESERVE, &cached_state);
5208 
5209 	btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
5210 
5211 out_unlock:
5212 	if (ret) {
5213 		if (only_release_metadata)
5214 			btrfs_delalloc_release_metadata(inode, blocksize, true);
5215 		else
5216 			btrfs_delalloc_release_space(inode, data_reserved,
5217 					block_start, blocksize, true);
5218 	}
5219 	btrfs_delalloc_release_extents(inode, blocksize);
5220 	folio_unlock(folio);
5221 	folio_put(folio);
5222 out:
5223 	if (only_release_metadata)
5224 		btrfs_check_nocow_unlock(inode);
5225 	extent_changeset_free(data_reserved);
5226 	return ret;
5227 }
5228 
maybe_insert_hole(struct btrfs_inode * inode,u64 offset,u64 len)5229 static int maybe_insert_hole(struct btrfs_inode *inode, u64 offset, u64 len)
5230 {
5231 	struct btrfs_root *root = inode->root;
5232 	struct btrfs_fs_info *fs_info = root->fs_info;
5233 	struct btrfs_trans_handle *trans;
5234 	struct btrfs_drop_extents_args drop_args = { 0 };
5235 	int ret;
5236 
5237 	/*
5238 	 * If NO_HOLES is enabled, we don't need to do anything.
5239 	 * Later, up in the call chain, either btrfs_set_inode_last_sub_trans()
5240 	 * or btrfs_update_inode() will be called, which guarantee that the next
5241 	 * fsync will know this inode was changed and needs to be logged.
5242 	 */
5243 	if (btrfs_fs_incompat(fs_info, NO_HOLES))
5244 		return 0;
5245 
5246 	/*
5247 	 * 1 - for the one we're dropping
5248 	 * 1 - for the one we're adding
5249 	 * 1 - for updating the inode.
5250 	 */
5251 	trans = btrfs_start_transaction(root, 3);
5252 	if (IS_ERR(trans))
5253 		return PTR_ERR(trans);
5254 
5255 	drop_args.start = offset;
5256 	drop_args.end = offset + len;
5257 	drop_args.drop_cache = true;
5258 
5259 	ret = btrfs_drop_extents(trans, root, inode, &drop_args);
5260 	if (unlikely(ret)) {
5261 		btrfs_abort_transaction(trans, ret);
5262 		btrfs_end_transaction(trans);
5263 		return ret;
5264 	}
5265 
5266 	ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset, len);
5267 	if (ret) {
5268 		btrfs_abort_transaction(trans, ret);
5269 	} else {
5270 		btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found);
5271 		btrfs_update_inode(trans, inode);
5272 	}
5273 	btrfs_end_transaction(trans);
5274 	return ret;
5275 }
5276 
5277 /*
5278  * This function puts in dummy file extents for the area we're creating a hole
5279  * for.  So if we are truncating this file to a larger size we need to insert
5280  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
5281  * the range between oldsize and size
5282  */
btrfs_cont_expand(struct btrfs_inode * inode,loff_t oldsize,loff_t size)5283 int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
5284 {
5285 	struct btrfs_root *root = inode->root;
5286 	struct btrfs_fs_info *fs_info = root->fs_info;
5287 	struct extent_io_tree *io_tree = &inode->io_tree;
5288 	struct extent_map *em = NULL;
5289 	struct extent_state *cached_state = NULL;
5290 	u64 hole_start = ALIGN(oldsize, fs_info->sectorsize);
5291 	u64 block_end = ALIGN(size, fs_info->sectorsize);
5292 	u64 last_byte;
5293 	u64 cur_offset;
5294 	u64 hole_size;
5295 	int ret = 0;
5296 
5297 	/*
5298 	 * If our size started in the middle of a block we need to zero out the
5299 	 * rest of the block before we expand the i_size, otherwise we could
5300 	 * expose stale data.
5301 	 */
5302 	ret = btrfs_truncate_block(inode, oldsize, oldsize, -1);
5303 	if (ret)
5304 		return ret;
5305 
5306 	if (size <= hole_start)
5307 		return 0;
5308 
5309 	btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1,
5310 					   &cached_state);
5311 	cur_offset = hole_start;
5312 	while (1) {
5313 		em = btrfs_get_extent(inode, NULL, cur_offset, block_end - cur_offset);
5314 		if (IS_ERR(em)) {
5315 			ret = PTR_ERR(em);
5316 			em = NULL;
5317 			break;
5318 		}
5319 		last_byte = min(btrfs_extent_map_end(em), block_end);
5320 		last_byte = ALIGN(last_byte, fs_info->sectorsize);
5321 		hole_size = last_byte - cur_offset;
5322 
5323 		if (!(em->flags & EXTENT_FLAG_PREALLOC)) {
5324 			struct extent_map *hole_em;
5325 
5326 			ret = maybe_insert_hole(inode, cur_offset, hole_size);
5327 			if (ret)
5328 				break;
5329 
5330 			ret = btrfs_inode_set_file_extent_range(inode,
5331 							cur_offset, hole_size);
5332 			if (ret)
5333 				break;
5334 
5335 			hole_em = btrfs_alloc_extent_map();
5336 			if (!hole_em) {
5337 				btrfs_drop_extent_map_range(inode, cur_offset,
5338 						    cur_offset + hole_size - 1,
5339 						    false);
5340 				btrfs_set_inode_full_sync(inode);
5341 				goto next;
5342 			}
5343 			hole_em->start = cur_offset;
5344 			hole_em->len = hole_size;
5345 
5346 			hole_em->disk_bytenr = EXTENT_MAP_HOLE;
5347 			hole_em->disk_num_bytes = 0;
5348 			hole_em->ram_bytes = hole_size;
5349 			hole_em->generation = btrfs_get_fs_generation(fs_info);
5350 
5351 			ret = btrfs_replace_extent_map_range(inode, hole_em, true);
5352 			btrfs_free_extent_map(hole_em);
5353 		} else {
5354 			ret = btrfs_inode_set_file_extent_range(inode,
5355 							cur_offset, hole_size);
5356 			if (ret)
5357 				break;
5358 		}
5359 next:
5360 		btrfs_free_extent_map(em);
5361 		em = NULL;
5362 		cur_offset = last_byte;
5363 		if (cur_offset >= block_end)
5364 			break;
5365 	}
5366 	btrfs_free_extent_map(em);
5367 	btrfs_unlock_extent(io_tree, hole_start, block_end - 1, &cached_state);
5368 	return ret;
5369 }
5370 
btrfs_setsize(struct inode * inode,struct iattr * attr)5371 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
5372 {
5373 	struct btrfs_root *root = BTRFS_I(inode)->root;
5374 	struct btrfs_trans_handle *trans;
5375 	loff_t oldsize = i_size_read(inode);
5376 	loff_t newsize = attr->ia_size;
5377 	int mask = attr->ia_valid;
5378 	int ret;
5379 
5380 	/*
5381 	 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
5382 	 * special case where we need to update the times despite not having
5383 	 * these flags set.  For all other operations the VFS set these flags
5384 	 * explicitly if it wants a timestamp update.
5385 	 */
5386 	if (newsize != oldsize) {
5387 		inode_inc_iversion(inode);
5388 		if (!(mask & (ATTR_CTIME | ATTR_MTIME))) {
5389 			inode_set_mtime_to_ts(inode,
5390 					      inode_set_ctime_current(inode));
5391 		}
5392 	}
5393 
5394 	if (newsize > oldsize) {
5395 		/*
5396 		 * Don't do an expanding truncate while snapshotting is ongoing.
5397 		 * This is to ensure the snapshot captures a fully consistent
5398 		 * state of this file - if the snapshot captures this expanding
5399 		 * truncation, it must capture all writes that happened before
5400 		 * this truncation.
5401 		 */
5402 		btrfs_drew_write_lock(&root->snapshot_lock);
5403 		ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, newsize);
5404 		if (ret) {
5405 			btrfs_drew_write_unlock(&root->snapshot_lock);
5406 			return ret;
5407 		}
5408 
5409 		trans = btrfs_start_transaction(root, 1);
5410 		if (IS_ERR(trans)) {
5411 			btrfs_drew_write_unlock(&root->snapshot_lock);
5412 			return PTR_ERR(trans);
5413 		}
5414 
5415 		i_size_write(inode, newsize);
5416 		btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
5417 		pagecache_isize_extended(inode, oldsize, newsize);
5418 		ret = btrfs_update_inode(trans, BTRFS_I(inode));
5419 		btrfs_drew_write_unlock(&root->snapshot_lock);
5420 		btrfs_end_transaction(trans);
5421 	} else {
5422 		struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
5423 
5424 		if (btrfs_is_zoned(fs_info)) {
5425 			ret = btrfs_wait_ordered_range(BTRFS_I(inode),
5426 					ALIGN(newsize, fs_info->sectorsize),
5427 					(u64)-1);
5428 			if (ret)
5429 				return ret;
5430 		}
5431 
5432 		/*
5433 		 * We're truncating a file that used to have good data down to
5434 		 * zero. Make sure any new writes to the file get on disk
5435 		 * on close.
5436 		 */
5437 		if (newsize == 0)
5438 			set_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
5439 				&BTRFS_I(inode)->runtime_flags);
5440 
5441 		truncate_setsize(inode, newsize);
5442 
5443 		inode_dio_wait(inode);
5444 
5445 		ret = btrfs_truncate(BTRFS_I(inode), newsize == oldsize);
5446 		if (ret && inode->i_nlink) {
5447 			int ret2;
5448 
5449 			/*
5450 			 * Truncate failed, so fix up the in-memory size. We
5451 			 * adjusted disk_i_size down as we removed extents, so
5452 			 * wait for disk_i_size to be stable and then update the
5453 			 * in-memory size to match.
5454 			 */
5455 			ret2 = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
5456 			if (ret2)
5457 				return ret2;
5458 			i_size_write(inode, BTRFS_I(inode)->disk_i_size);
5459 		}
5460 	}
5461 
5462 	return ret;
5463 }
5464 
btrfs_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)5465 static int btrfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
5466 			 struct iattr *attr)
5467 {
5468 	struct inode *inode = d_inode(dentry);
5469 	struct btrfs_root *root = BTRFS_I(inode)->root;
5470 	int ret;
5471 
5472 	if (btrfs_root_readonly(root))
5473 		return -EROFS;
5474 
5475 	ret = setattr_prepare(idmap, dentry, attr);
5476 	if (ret)
5477 		return ret;
5478 
5479 	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
5480 		ret = btrfs_setsize(inode, attr);
5481 		if (ret)
5482 			return ret;
5483 	}
5484 
5485 	if (attr->ia_valid) {
5486 		setattr_copy(idmap, inode, attr);
5487 		inode_inc_iversion(inode);
5488 		ret = btrfs_dirty_inode(BTRFS_I(inode));
5489 
5490 		if (!ret && attr->ia_valid & ATTR_MODE)
5491 			ret = posix_acl_chmod(idmap, dentry, inode->i_mode);
5492 	}
5493 
5494 	return ret;
5495 }
5496 
5497 /*
5498  * While truncating the inode pages during eviction, we get the VFS
5499  * calling btrfs_invalidate_folio() against each folio of the inode. This
5500  * is slow because the calls to btrfs_invalidate_folio() result in a
5501  * huge amount of calls to lock_extent() and clear_extent_bit(),
5502  * which keep merging and splitting extent_state structures over and over,
5503  * wasting lots of time.
5504  *
5505  * Therefore if the inode is being evicted, let btrfs_invalidate_folio()
5506  * skip all those expensive operations on a per folio basis and do only
5507  * the ordered io finishing, while we release here the extent_map and
5508  * extent_state structures, without the excessive merging and splitting.
5509  */
evict_inode_truncate_pages(struct inode * inode)5510 static void evict_inode_truncate_pages(struct inode *inode)
5511 {
5512 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5513 	struct rb_node *node;
5514 
5515 	ASSERT(inode_state_read_once(inode) & I_FREEING);
5516 	truncate_inode_pages_final(&inode->i_data);
5517 
5518 	btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
5519 
5520 	/*
5521 	 * Keep looping until we have no more ranges in the io tree.
5522 	 * We can have ongoing bios started by readahead that have
5523 	 * their endio callback (extent_io.c:end_bio_extent_readpage)
5524 	 * still in progress (unlocked the pages in the bio but did not yet
5525 	 * unlocked the ranges in the io tree). Therefore this means some
5526 	 * ranges can still be locked and eviction started because before
5527 	 * submitting those bios, which are executed by a separate task (work
5528 	 * queue kthread), inode references (inode->i_count) were not taken
5529 	 * (which would be dropped in the end io callback of each bio).
5530 	 * Therefore here we effectively end up waiting for those bios and
5531 	 * anyone else holding locked ranges without having bumped the inode's
5532 	 * reference count - if we don't do it, when they access the inode's
5533 	 * io_tree to unlock a range it may be too late, leading to an
5534 	 * use-after-free issue.
5535 	 */
5536 	spin_lock(&io_tree->lock);
5537 	while (!RB_EMPTY_ROOT(&io_tree->state)) {
5538 		struct extent_state *state;
5539 		struct extent_state *cached_state = NULL;
5540 		u64 start;
5541 		u64 end;
5542 		unsigned state_flags;
5543 
5544 		node = rb_first(&io_tree->state);
5545 		state = rb_entry(node, struct extent_state, rb_node);
5546 		start = state->start;
5547 		end = state->end;
5548 		state_flags = state->state;
5549 		spin_unlock(&io_tree->lock);
5550 
5551 		btrfs_lock_extent(io_tree, start, end, &cached_state);
5552 
5553 		/*
5554 		 * If still has DELALLOC flag, the extent didn't reach disk,
5555 		 * and its reserved space won't be freed by delayed_ref.
5556 		 * So we need to free its reserved space here.
5557 		 * (Refer to comment in btrfs_invalidate_folio, case 2)
5558 		 *
5559 		 * Note, end is the bytenr of last byte, so we need + 1 here.
5560 		 */
5561 		if (state_flags & EXTENT_DELALLOC)
5562 			btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start,
5563 					       end - start + 1, NULL);
5564 
5565 		btrfs_clear_extent_bit(io_tree, start, end,
5566 				       EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING,
5567 				       &cached_state);
5568 
5569 		cond_resched();
5570 		spin_lock(&io_tree->lock);
5571 	}
5572 	spin_unlock(&io_tree->lock);
5573 }
5574 
evict_refill_and_join(struct btrfs_root * root,struct btrfs_block_rsv * rsv)5575 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
5576 							struct btrfs_block_rsv *rsv)
5577 {
5578 	struct btrfs_fs_info *fs_info = root->fs_info;
5579 	struct btrfs_trans_handle *trans;
5580 	u64 delayed_refs_extra = btrfs_calc_delayed_ref_bytes(fs_info, 1);
5581 	int ret;
5582 
5583 	/*
5584 	 * Eviction should be taking place at some place safe because of our
5585 	 * delayed iputs.  However the normal flushing code will run delayed
5586 	 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock.
5587 	 *
5588 	 * We reserve the delayed_refs_extra here again because we can't use
5589 	 * btrfs_start_transaction(root, 0) for the same deadlocky reason as
5590 	 * above.  We reserve our extra bit here because we generate a ton of
5591 	 * delayed refs activity by truncating.
5592 	 *
5593 	 * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can,
5594 	 * if we fail to make this reservation we can re-try without the
5595 	 * delayed_refs_extra so we can make some forward progress.
5596 	 */
5597 	ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size + delayed_refs_extra,
5598 				     BTRFS_RESERVE_FLUSH_EVICT);
5599 	if (ret) {
5600 		ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size,
5601 					     BTRFS_RESERVE_FLUSH_EVICT);
5602 		if (ret) {
5603 			btrfs_warn(fs_info,
5604 				   "could not allocate space for delete; will truncate on mount");
5605 			return ERR_PTR(-ENOSPC);
5606 		}
5607 		delayed_refs_extra = 0;
5608 	}
5609 
5610 	trans = btrfs_join_transaction(root);
5611 	if (IS_ERR(trans))
5612 		return trans;
5613 
5614 	if (delayed_refs_extra) {
5615 		trans->block_rsv = &fs_info->trans_block_rsv;
5616 		trans->bytes_reserved = delayed_refs_extra;
5617 		btrfs_block_rsv_migrate(rsv, trans->block_rsv,
5618 					delayed_refs_extra, true);
5619 	}
5620 	return trans;
5621 }
5622 
btrfs_evict_inode(struct inode * inode)5623 void btrfs_evict_inode(struct inode *inode)
5624 {
5625 	struct btrfs_fs_info *fs_info;
5626 	struct btrfs_trans_handle *trans;
5627 	struct btrfs_root *root = BTRFS_I(inode)->root;
5628 	struct btrfs_block_rsv rsv;
5629 	int ret;
5630 
5631 	trace_btrfs_inode_evict(inode);
5632 
5633 	if (!root)
5634 		goto clear_inode;
5635 
5636 	fs_info = inode_to_fs_info(inode);
5637 	evict_inode_truncate_pages(inode);
5638 
5639 	if (inode->i_nlink &&
5640 	    ((btrfs_root_refs(&root->root_item) != 0 &&
5641 	      btrfs_root_id(root) != BTRFS_ROOT_TREE_OBJECTID) ||
5642 	     btrfs_is_free_space_inode(BTRFS_I(inode))))
5643 		goto out;
5644 
5645 	if (is_bad_inode(inode))
5646 		goto out;
5647 
5648 	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
5649 		goto out;
5650 
5651 	if (inode->i_nlink > 0) {
5652 		BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
5653 		       btrfs_root_id(root) != BTRFS_ROOT_TREE_OBJECTID);
5654 		goto out;
5655 	}
5656 
5657 	/*
5658 	 * This makes sure the inode item in tree is uptodate and the space for
5659 	 * the inode update is released.
5660 	 */
5661 	ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode));
5662 	if (ret)
5663 		goto out;
5664 
5665 	/*
5666 	 * This drops any pending insert or delete operations we have for this
5667 	 * inode.  We could have a delayed dir index deletion queued up, but
5668 	 * we're removing the inode completely so that'll be taken care of in
5669 	 * the truncate.
5670 	 */
5671 	btrfs_kill_delayed_inode_items(BTRFS_I(inode));
5672 
5673 	btrfs_init_metadata_block_rsv(fs_info, &rsv, BTRFS_BLOCK_RSV_TEMP);
5674 	rsv.size = btrfs_calc_metadata_size(fs_info, 1);
5675 	rsv.failfast = true;
5676 
5677 	btrfs_i_size_write(BTRFS_I(inode), 0);
5678 
5679 	while (1) {
5680 		struct btrfs_truncate_control control = {
5681 			.inode = BTRFS_I(inode),
5682 			.ino = btrfs_ino(BTRFS_I(inode)),
5683 			.new_size = 0,
5684 			.min_type = 0,
5685 		};
5686 
5687 		trans = evict_refill_and_join(root, &rsv);
5688 		if (IS_ERR(trans))
5689 			goto out_release;
5690 
5691 		trans->block_rsv = &rsv;
5692 
5693 		ret = btrfs_truncate_inode_items(trans, root, &control);
5694 		trans->block_rsv = &fs_info->trans_block_rsv;
5695 		btrfs_end_transaction(trans);
5696 		/*
5697 		 * We have not added new delayed items for our inode after we
5698 		 * have flushed its delayed items, so no need to throttle on
5699 		 * delayed items. However we have modified extent buffers.
5700 		 */
5701 		btrfs_btree_balance_dirty_nodelay(fs_info);
5702 		if (ret && ret != -ENOSPC && ret != -EAGAIN)
5703 			goto out_release;
5704 		else if (!ret)
5705 			break;
5706 	}
5707 
5708 	/*
5709 	 * Errors here aren't a big deal, it just means we leave orphan items in
5710 	 * the tree. They will be cleaned up on the next mount. If the inode
5711 	 * number gets reused, cleanup deletes the orphan item without doing
5712 	 * anything, and unlink reuses the existing orphan item.
5713 	 *
5714 	 * If it turns out that we are dropping too many of these, we might want
5715 	 * to add a mechanism for retrying these after a commit.
5716 	 */
5717 	trans = evict_refill_and_join(root, &rsv);
5718 	if (!IS_ERR(trans)) {
5719 		trans->block_rsv = &rsv;
5720 		btrfs_orphan_del(trans, BTRFS_I(inode));
5721 		trans->block_rsv = &fs_info->trans_block_rsv;
5722 		btrfs_end_transaction(trans);
5723 	}
5724 
5725 out_release:
5726 	btrfs_block_rsv_release(fs_info, &rsv, (u64)-1, NULL);
5727 out:
5728 	/*
5729 	 * If we didn't successfully delete, the orphan item will still be in
5730 	 * the tree and we'll retry on the next mount. Again, we might also want
5731 	 * to retry these periodically in the future.
5732 	 */
5733 	btrfs_remove_delayed_node(BTRFS_I(inode));
5734 clear_inode:
5735 	clear_inode(inode);
5736 }
5737 
5738 /*
5739  * Return the key found in the dir entry in the location pointer, fill @type
5740  * with BTRFS_FT_*, and return 0.
5741  *
5742  * If no dir entries were found, returns -ENOENT.
5743  * If found a corrupted location in dir entry, returns -EUCLEAN.
5744  */
btrfs_inode_by_name(struct btrfs_inode * dir,struct dentry * dentry,struct btrfs_key * location,u8 * type)5745 static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry,
5746 			       struct btrfs_key *location, u8 *type)
5747 {
5748 	struct btrfs_dir_item *di;
5749 	BTRFS_PATH_AUTO_FREE(path);
5750 	struct btrfs_root *root = dir->root;
5751 	int ret = 0;
5752 	struct fscrypt_name fname;
5753 
5754 	path = btrfs_alloc_path();
5755 	if (!path)
5756 		return -ENOMEM;
5757 
5758 	ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname);
5759 	if (ret < 0)
5760 		return ret;
5761 	/*
5762 	 * fscrypt_setup_filename() should never return a positive value, but
5763 	 * gcc on sparc/parisc thinks it can, so assert that doesn't happen.
5764 	 */
5765 	ASSERT(ret == 0);
5766 
5767 	/* This needs to handle no-key deletions later on */
5768 
5769 	di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir),
5770 				   &fname.disk_name, 0);
5771 	if (IS_ERR_OR_NULL(di)) {
5772 		ret = di ? PTR_ERR(di) : -ENOENT;
5773 		goto out;
5774 	}
5775 
5776 	btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
5777 	if (unlikely(location->type != BTRFS_INODE_ITEM_KEY &&
5778 		     location->type != BTRFS_ROOT_ITEM_KEY)) {
5779 		ret = -EUCLEAN;
5780 		btrfs_warn(root->fs_info,
5781 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location " BTRFS_KEY_FMT ")",
5782 			   __func__, fname.disk_name.name, btrfs_ino(dir),
5783 			   BTRFS_KEY_FMT_VALUE(location));
5784 	}
5785 	if (!ret)
5786 		*type = btrfs_dir_ftype(path->nodes[0], di);
5787 out:
5788 	fscrypt_free_filename(&fname);
5789 	return ret;
5790 }
5791 
5792 /*
5793  * when we hit a tree root in a directory, the btrfs part of the inode
5794  * needs to be changed to reflect the root directory of the tree root.  This
5795  * is kind of like crossing a mount point.
5796  */
fixup_tree_root_location(struct btrfs_fs_info * fs_info,struct btrfs_inode * dir,struct dentry * dentry,struct btrfs_key * location,struct btrfs_root ** sub_root)5797 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
5798 				    struct btrfs_inode *dir,
5799 				    struct dentry *dentry,
5800 				    struct btrfs_key *location,
5801 				    struct btrfs_root **sub_root)
5802 {
5803 	BTRFS_PATH_AUTO_FREE(path);
5804 	struct btrfs_root *new_root;
5805 	struct btrfs_root_ref *ref;
5806 	struct extent_buffer *leaf;
5807 	struct btrfs_key key;
5808 	int ret;
5809 	int err = 0;
5810 	struct fscrypt_name fname;
5811 
5812 	ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 0, &fname);
5813 	if (ret)
5814 		return ret;
5815 
5816 	path = btrfs_alloc_path();
5817 	if (!path) {
5818 		err = -ENOMEM;
5819 		goto out;
5820 	}
5821 
5822 	err = -ENOENT;
5823 	key.objectid = btrfs_root_id(dir->root);
5824 	key.type = BTRFS_ROOT_REF_KEY;
5825 	key.offset = location->objectid;
5826 
5827 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
5828 	if (ret) {
5829 		if (ret < 0)
5830 			err = ret;
5831 		goto out;
5832 	}
5833 
5834 	leaf = path->nodes[0];
5835 	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
5836 	if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
5837 	    btrfs_root_ref_name_len(leaf, ref) != fname.disk_name.len)
5838 		goto out;
5839 
5840 	ret = memcmp_extent_buffer(leaf, fname.disk_name.name,
5841 				   (unsigned long)(ref + 1), fname.disk_name.len);
5842 	if (ret)
5843 		goto out;
5844 
5845 	btrfs_release_path(path);
5846 
5847 	new_root = btrfs_get_fs_root(fs_info, location->objectid, true);
5848 	if (IS_ERR(new_root)) {
5849 		err = PTR_ERR(new_root);
5850 		goto out;
5851 	}
5852 
5853 	*sub_root = new_root;
5854 	location->objectid = btrfs_root_dirid(&new_root->root_item);
5855 	location->type = BTRFS_INODE_ITEM_KEY;
5856 	location->offset = 0;
5857 	err = 0;
5858 out:
5859 	fscrypt_free_filename(&fname);
5860 	return err;
5861 }
5862 
5863 
5864 
btrfs_del_inode_from_root(struct btrfs_inode * inode)5865 static void btrfs_del_inode_from_root(struct btrfs_inode *inode)
5866 {
5867 	struct btrfs_root *root = inode->root;
5868 	struct btrfs_inode *entry;
5869 	bool empty = false;
5870 
5871 	xa_lock(&root->inodes);
5872 	/*
5873 	 * This btrfs_inode is being freed and has already been unhashed at this
5874 	 * point. It's possible that another btrfs_inode has already been
5875 	 * allocated for the same inode and inserted itself into the root, so
5876 	 * don't delete it in that case.
5877 	 *
5878 	 * Note that this shouldn't need to allocate memory, so the gfp flags
5879 	 * don't really matter.
5880 	 */
5881 	entry = __xa_cmpxchg(&root->inodes, btrfs_ino(inode), inode, NULL,
5882 			     GFP_ATOMIC);
5883 	if (entry == inode)
5884 		empty = xa_empty(&root->inodes);
5885 	xa_unlock(&root->inodes);
5886 
5887 	if (empty && btrfs_root_refs(&root->root_item) == 0) {
5888 		xa_lock(&root->inodes);
5889 		empty = xa_empty(&root->inodes);
5890 		xa_unlock(&root->inodes);
5891 		if (empty)
5892 			btrfs_add_dead_root(root);
5893 	}
5894 }
5895 
5896 
btrfs_init_locked_inode(struct inode * inode,void * p)5897 static int btrfs_init_locked_inode(struct inode *inode, void *p)
5898 {
5899 	struct btrfs_iget_args *args = p;
5900 
5901 	btrfs_set_inode_number(BTRFS_I(inode), args->ino);
5902 	BTRFS_I(inode)->root = btrfs_grab_root(args->root);
5903 
5904 	if (args->root && args->root == args->root->fs_info->tree_root &&
5905 	    args->ino != BTRFS_BTREE_INODE_OBJECTID)
5906 		set_bit(BTRFS_INODE_FREE_SPACE_INODE,
5907 			&BTRFS_I(inode)->runtime_flags);
5908 	return 0;
5909 }
5910 
btrfs_find_actor(struct inode * inode,void * opaque)5911 static int btrfs_find_actor(struct inode *inode, void *opaque)
5912 {
5913 	struct btrfs_iget_args *args = opaque;
5914 
5915 	return args->ino == btrfs_ino(BTRFS_I(inode)) &&
5916 		args->root == BTRFS_I(inode)->root;
5917 }
5918 
btrfs_iget_locked(u64 ino,struct btrfs_root * root)5919 static struct btrfs_inode *btrfs_iget_locked(u64 ino, struct btrfs_root *root)
5920 {
5921 	struct inode *inode;
5922 	struct btrfs_iget_args args;
5923 	unsigned long hashval = btrfs_inode_hash(ino, root);
5924 
5925 	args.ino = ino;
5926 	args.root = root;
5927 
5928 	inode = iget5_locked_rcu(root->fs_info->sb, hashval, btrfs_find_actor,
5929 			     btrfs_init_locked_inode,
5930 			     (void *)&args);
5931 	if (!inode)
5932 		return NULL;
5933 	return BTRFS_I(inode);
5934 }
5935 
5936 /*
5937  * Get an inode object given its inode number and corresponding root.  Path is
5938  * preallocated to prevent recursing back to iget through allocator.
5939  */
btrfs_iget_path(u64 ino,struct btrfs_root * root,struct btrfs_path * path)5940 struct btrfs_inode *btrfs_iget_path(u64 ino, struct btrfs_root *root,
5941 				    struct btrfs_path *path)
5942 {
5943 	struct btrfs_inode *inode;
5944 	int ret;
5945 
5946 	inode = btrfs_iget_locked(ino, root);
5947 	if (!inode)
5948 		return ERR_PTR(-ENOMEM);
5949 
5950 	if (!(inode_state_read_once(&inode->vfs_inode) & I_NEW))
5951 		return inode;
5952 
5953 	ret = btrfs_read_locked_inode(inode, path);
5954 	if (ret)
5955 		return ERR_PTR(ret);
5956 
5957 	unlock_new_inode(&inode->vfs_inode);
5958 	return inode;
5959 }
5960 
5961 /*
5962  * Get an inode object given its inode number and corresponding root.
5963  */
btrfs_iget(u64 ino,struct btrfs_root * root)5964 struct btrfs_inode *btrfs_iget(u64 ino, struct btrfs_root *root)
5965 {
5966 	struct btrfs_inode *inode;
5967 	struct btrfs_path *path;
5968 	int ret;
5969 
5970 	inode = btrfs_iget_locked(ino, root);
5971 	if (!inode)
5972 		return ERR_PTR(-ENOMEM);
5973 
5974 	if (!(inode_state_read_once(&inode->vfs_inode) & I_NEW))
5975 		return inode;
5976 
5977 	path = btrfs_alloc_path();
5978 	if (!path) {
5979 		iget_failed(&inode->vfs_inode);
5980 		return ERR_PTR(-ENOMEM);
5981 	}
5982 
5983 	ret = btrfs_read_locked_inode(inode, path);
5984 	btrfs_free_path(path);
5985 	if (ret)
5986 		return ERR_PTR(ret);
5987 
5988 	if (S_ISDIR(inode->vfs_inode.i_mode))
5989 		inode->vfs_inode.i_opflags |= IOP_FASTPERM_MAY_EXEC;
5990 	unlock_new_inode(&inode->vfs_inode);
5991 	return inode;
5992 }
5993 
new_simple_dir(struct inode * dir,struct btrfs_key * key,struct btrfs_root * root)5994 static struct btrfs_inode *new_simple_dir(struct inode *dir,
5995 					  struct btrfs_key *key,
5996 					  struct btrfs_root *root)
5997 {
5998 	struct timespec64 ts;
5999 	struct inode *vfs_inode;
6000 	struct btrfs_inode *inode;
6001 
6002 	vfs_inode = new_inode(dir->i_sb);
6003 	if (!vfs_inode)
6004 		return ERR_PTR(-ENOMEM);
6005 
6006 	inode = BTRFS_I(vfs_inode);
6007 	inode->root = btrfs_grab_root(root);
6008 	inode->ref_root_id = key->objectid;
6009 	set_bit(BTRFS_INODE_ROOT_STUB, &inode->runtime_flags);
6010 	set_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags);
6011 
6012 	btrfs_set_inode_number(inode, BTRFS_EMPTY_SUBVOL_DIR_OBJECTID);
6013 	/*
6014 	 * We only need lookup, the rest is read-only and there's no inode
6015 	 * associated with the dentry
6016 	 */
6017 	vfs_inode->i_op = &simple_dir_inode_operations;
6018 	vfs_inode->i_opflags &= ~IOP_XATTR;
6019 	vfs_inode->i_fop = &simple_dir_operations;
6020 	vfs_inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
6021 
6022 	ts = inode_set_ctime_current(vfs_inode);
6023 	inode_set_mtime_to_ts(vfs_inode, ts);
6024 	inode_set_atime_to_ts(vfs_inode, inode_get_atime(dir));
6025 	inode->i_otime_sec = ts.tv_sec;
6026 	inode->i_otime_nsec = ts.tv_nsec;
6027 
6028 	vfs_inode->i_uid = dir->i_uid;
6029 	vfs_inode->i_gid = dir->i_gid;
6030 
6031 	return inode;
6032 }
6033 
6034 static_assert(BTRFS_FT_UNKNOWN == FT_UNKNOWN);
6035 static_assert(BTRFS_FT_REG_FILE == FT_REG_FILE);
6036 static_assert(BTRFS_FT_DIR == FT_DIR);
6037 static_assert(BTRFS_FT_CHRDEV == FT_CHRDEV);
6038 static_assert(BTRFS_FT_BLKDEV == FT_BLKDEV);
6039 static_assert(BTRFS_FT_FIFO == FT_FIFO);
6040 static_assert(BTRFS_FT_SOCK == FT_SOCK);
6041 static_assert(BTRFS_FT_SYMLINK == FT_SYMLINK);
6042 
btrfs_inode_type(const struct btrfs_inode * inode)6043 static inline u8 btrfs_inode_type(const struct btrfs_inode *inode)
6044 {
6045 	return fs_umode_to_ftype(inode->vfs_inode.i_mode);
6046 }
6047 
btrfs_lookup_dentry(struct inode * dir,struct dentry * dentry)6048 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
6049 {
6050 	struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
6051 	struct btrfs_inode *inode;
6052 	struct btrfs_root *root = BTRFS_I(dir)->root;
6053 	struct btrfs_root *sub_root = root;
6054 	struct btrfs_key location = { 0 };
6055 	u8 di_type = 0;
6056 	int ret = 0;
6057 
6058 	if (dentry->d_name.len > BTRFS_NAME_LEN)
6059 		return ERR_PTR(-ENAMETOOLONG);
6060 
6061 	ret = btrfs_inode_by_name(BTRFS_I(dir), dentry, &location, &di_type);
6062 	if (ret < 0)
6063 		return ERR_PTR(ret);
6064 
6065 	if (location.type == BTRFS_INODE_ITEM_KEY) {
6066 		inode = btrfs_iget(location.objectid, root);
6067 		if (IS_ERR(inode))
6068 			return ERR_CAST(inode);
6069 
6070 		/* Do extra check against inode mode with di_type */
6071 		if (unlikely(btrfs_inode_type(inode) != di_type)) {
6072 			btrfs_crit(fs_info,
6073 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u",
6074 				  inode->vfs_inode.i_mode, btrfs_inode_type(inode),
6075 				  di_type);
6076 			iput(&inode->vfs_inode);
6077 			return ERR_PTR(-EUCLEAN);
6078 		}
6079 		return &inode->vfs_inode;
6080 	}
6081 
6082 	ret = fixup_tree_root_location(fs_info, BTRFS_I(dir), dentry,
6083 				       &location, &sub_root);
6084 	if (ret < 0) {
6085 		if (ret != -ENOENT)
6086 			inode = ERR_PTR(ret);
6087 		else
6088 			inode = new_simple_dir(dir, &location, root);
6089 	} else {
6090 		inode = btrfs_iget(location.objectid, sub_root);
6091 		btrfs_put_root(sub_root);
6092 
6093 		if (IS_ERR(inode))
6094 			return ERR_CAST(inode);
6095 
6096 		down_read(&fs_info->cleanup_work_sem);
6097 		if (!sb_rdonly(inode->vfs_inode.i_sb))
6098 			ret = btrfs_orphan_cleanup(sub_root);
6099 		up_read(&fs_info->cleanup_work_sem);
6100 		if (ret) {
6101 			iput(&inode->vfs_inode);
6102 			inode = ERR_PTR(ret);
6103 		}
6104 	}
6105 
6106 	if (IS_ERR(inode))
6107 		return ERR_CAST(inode);
6108 
6109 	return &inode->vfs_inode;
6110 }
6111 
btrfs_dentry_delete(const struct dentry * dentry)6112 static int btrfs_dentry_delete(const struct dentry *dentry)
6113 {
6114 	struct btrfs_root *root;
6115 	struct inode *inode = d_inode(dentry);
6116 
6117 	if (!inode && !IS_ROOT(dentry))
6118 		inode = d_inode(dentry->d_parent);
6119 
6120 	if (inode) {
6121 		root = BTRFS_I(inode)->root;
6122 		if (btrfs_root_refs(&root->root_item) == 0)
6123 			return 1;
6124 
6125 		if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
6126 			return 1;
6127 	}
6128 	return 0;
6129 }
6130 
btrfs_lookup(struct inode * dir,struct dentry * dentry,unsigned int flags)6131 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
6132 				   unsigned int flags)
6133 {
6134 	struct inode *inode = btrfs_lookup_dentry(dir, dentry);
6135 
6136 	if (inode == ERR_PTR(-ENOENT))
6137 		inode = NULL;
6138 	return d_splice_alias(inode, dentry);
6139 }
6140 
6141 /*
6142  * Find the highest existing sequence number in a directory and then set the
6143  * in-memory index_cnt variable to the first free sequence number.
6144  */
btrfs_set_inode_index_count(struct btrfs_inode * inode)6145 static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
6146 {
6147 	struct btrfs_root *root = inode->root;
6148 	struct btrfs_key key, found_key;
6149 	BTRFS_PATH_AUTO_FREE(path);
6150 	struct extent_buffer *leaf;
6151 	int ret;
6152 
6153 	key.objectid = btrfs_ino(inode);
6154 	key.type = BTRFS_DIR_INDEX_KEY;
6155 	key.offset = (u64)-1;
6156 
6157 	path = btrfs_alloc_path();
6158 	if (!path)
6159 		return -ENOMEM;
6160 
6161 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6162 	if (ret < 0)
6163 		return ret;
6164 
6165 	if (unlikely(ret == 0)) {
6166 		/*
6167 		 * Key with offset -1 found, there would have to exist a dir
6168 		 * index item with such offset, but this is out of the valid
6169 		 * range.
6170 		 */
6171 		btrfs_err(root->fs_info,
6172 			  "unexpected exact match for DIR_INDEX key, inode %llu",
6173 			  btrfs_ino(inode));
6174 		return -EUCLEAN;
6175 	}
6176 
6177 	if (path->slots[0] == 0) {
6178 		inode->index_cnt = BTRFS_DIR_START_INDEX;
6179 		return 0;
6180 	}
6181 
6182 	path->slots[0]--;
6183 
6184 	leaf = path->nodes[0];
6185 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6186 
6187 	if (found_key.objectid != btrfs_ino(inode) ||
6188 	    found_key.type != BTRFS_DIR_INDEX_KEY) {
6189 		inode->index_cnt = BTRFS_DIR_START_INDEX;
6190 		return 0;
6191 	}
6192 
6193 	inode->index_cnt = found_key.offset + 1;
6194 
6195 	return 0;
6196 }
6197 
btrfs_get_dir_last_index(struct btrfs_inode * dir,u64 * index)6198 static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index)
6199 {
6200 	int ret = 0;
6201 
6202 	btrfs_inode_lock(dir, 0);
6203 	if (dir->index_cnt == (u64)-1) {
6204 		ret = btrfs_inode_delayed_dir_index_count(dir);
6205 		if (ret) {
6206 			ret = btrfs_set_inode_index_count(dir);
6207 			if (ret)
6208 				goto out;
6209 		}
6210 	}
6211 
6212 	/* index_cnt is the index number of next new entry, so decrement it. */
6213 	*index = dir->index_cnt - 1;
6214 out:
6215 	btrfs_inode_unlock(dir, 0);
6216 
6217 	return ret;
6218 }
6219 
6220 /*
6221  * All this infrastructure exists because dir_emit can fault, and we are holding
6222  * the tree lock when doing readdir.  For now just allocate a buffer and copy
6223  * our information into that, and then dir_emit from the buffer.  This is
6224  * similar to what NFS does, only we don't keep the buffer around in pagecache
6225  * because I'm afraid I'll mess that up.  Long term we need to make filldir do
6226  * copy_to_user_inatomic so we don't have to worry about page faulting under the
6227  * tree lock.
6228  */
btrfs_opendir(struct inode * inode,struct file * file)6229 static int btrfs_opendir(struct inode *inode, struct file *file)
6230 {
6231 	struct btrfs_file_private *private;
6232 	u64 last_index;
6233 	int ret;
6234 
6235 	ret = btrfs_get_dir_last_index(BTRFS_I(inode), &last_index);
6236 	if (ret)
6237 		return ret;
6238 
6239 	private = kzalloc_obj(struct btrfs_file_private);
6240 	if (!private)
6241 		return -ENOMEM;
6242 	private->last_index = last_index;
6243 	private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
6244 	if (!private->filldir_buf) {
6245 		kfree(private);
6246 		return -ENOMEM;
6247 	}
6248 	file->private_data = private;
6249 	return 0;
6250 }
6251 
btrfs_dir_llseek(struct file * file,loff_t offset,int whence)6252 static loff_t btrfs_dir_llseek(struct file *file, loff_t offset, int whence)
6253 {
6254 	struct btrfs_file_private *private = file->private_data;
6255 	int ret;
6256 
6257 	ret = btrfs_get_dir_last_index(BTRFS_I(file_inode(file)),
6258 				       &private->last_index);
6259 	if (ret)
6260 		return ret;
6261 
6262 	return generic_file_llseek(file, offset, whence);
6263 }
6264 
6265 struct dir_entry {
6266 	u64 ino;
6267 	u64 offset;
6268 	unsigned type;
6269 	int name_len;
6270 };
6271 
btrfs_filldir(void * addr,int entries,struct dir_context * ctx)6272 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
6273 {
6274 	while (entries--) {
6275 		struct dir_entry *entry = addr;
6276 		char *name = (char *)(entry + 1);
6277 
6278 		ctx->pos = get_unaligned(&entry->offset);
6279 		if (!dir_emit(ctx, name, get_unaligned(&entry->name_len),
6280 					 get_unaligned(&entry->ino),
6281 					 get_unaligned(&entry->type)))
6282 			return 1;
6283 		addr += sizeof(struct dir_entry) +
6284 			get_unaligned(&entry->name_len);
6285 		ctx->pos++;
6286 	}
6287 	return 0;
6288 }
6289 
btrfs_real_readdir(struct file * file,struct dir_context * ctx)6290 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
6291 {
6292 	struct inode *inode = file_inode(file);
6293 	struct btrfs_root *root = BTRFS_I(inode)->root;
6294 	struct btrfs_file_private *private = file->private_data;
6295 	struct btrfs_dir_item *di;
6296 	struct btrfs_key key;
6297 	struct btrfs_key found_key;
6298 	BTRFS_PATH_AUTO_FREE(path);
6299 	void *addr;
6300 	LIST_HEAD(ins_list);
6301 	LIST_HEAD(del_list);
6302 	int ret;
6303 	char *name_ptr;
6304 	int name_len;
6305 	int entries = 0;
6306 	int total_len = 0;
6307 	bool put = false;
6308 	struct btrfs_key location;
6309 
6310 	if (!dir_emit_dots(file, ctx))
6311 		return 0;
6312 
6313 	path = btrfs_alloc_path();
6314 	if (!path)
6315 		return -ENOMEM;
6316 
6317 	addr = private->filldir_buf;
6318 	path->reada = READA_FORWARD;
6319 
6320 	put = btrfs_readdir_get_delayed_items(BTRFS_I(inode), private->last_index,
6321 					      &ins_list, &del_list);
6322 
6323 again:
6324 	key.type = BTRFS_DIR_INDEX_KEY;
6325 	key.offset = ctx->pos;
6326 	key.objectid = btrfs_ino(BTRFS_I(inode));
6327 
6328 	btrfs_for_each_slot(root, &key, &found_key, path, ret) {
6329 		struct dir_entry *entry;
6330 		struct extent_buffer *leaf = path->nodes[0];
6331 		u8 ftype;
6332 
6333 		if (found_key.objectid != key.objectid)
6334 			break;
6335 		if (found_key.type != BTRFS_DIR_INDEX_KEY)
6336 			break;
6337 		if (found_key.offset < ctx->pos)
6338 			continue;
6339 		if (found_key.offset > private->last_index)
6340 			break;
6341 		if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
6342 			continue;
6343 		di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
6344 		name_len = btrfs_dir_name_len(leaf, di);
6345 		if ((total_len + sizeof(struct dir_entry) + name_len) >=
6346 		    PAGE_SIZE) {
6347 			btrfs_release_path(path);
6348 			ret = btrfs_filldir(private->filldir_buf, entries, ctx);
6349 			if (ret)
6350 				goto nopos;
6351 			addr = private->filldir_buf;
6352 			entries = 0;
6353 			total_len = 0;
6354 			goto again;
6355 		}
6356 
6357 		ftype = btrfs_dir_flags_to_ftype(btrfs_dir_flags(leaf, di));
6358 		entry = addr;
6359 		name_ptr = (char *)(entry + 1);
6360 		read_extent_buffer(leaf, name_ptr,
6361 				   (unsigned long)(di + 1), name_len);
6362 		put_unaligned(name_len, &entry->name_len);
6363 		put_unaligned(fs_ftype_to_dtype(ftype), &entry->type);
6364 		btrfs_dir_item_key_to_cpu(leaf, di, &location);
6365 		put_unaligned(location.objectid, &entry->ino);
6366 		put_unaligned(found_key.offset, &entry->offset);
6367 		entries++;
6368 		addr += sizeof(struct dir_entry) + name_len;
6369 		total_len += sizeof(struct dir_entry) + name_len;
6370 	}
6371 	/* Catch error encountered during iteration */
6372 	if (ret < 0)
6373 		goto err;
6374 
6375 	btrfs_release_path(path);
6376 
6377 	ret = btrfs_filldir(private->filldir_buf, entries, ctx);
6378 	if (ret)
6379 		goto nopos;
6380 
6381 	if (btrfs_readdir_delayed_dir_index(ctx, &ins_list))
6382 		goto nopos;
6383 
6384 	/*
6385 	 * Stop new entries from being returned after we return the last
6386 	 * entry.
6387 	 *
6388 	 * New directory entries are assigned a strictly increasing
6389 	 * offset.  This means that new entries created during readdir
6390 	 * are *guaranteed* to be seen in the future by that readdir.
6391 	 * This has broken buggy programs which operate on names as
6392 	 * they're returned by readdir.  Until we reuse freed offsets
6393 	 * we have this hack to stop new entries from being returned
6394 	 * under the assumption that they'll never reach this huge
6395 	 * offset.
6396 	 *
6397 	 * This is being careful not to overflow 32bit loff_t unless the
6398 	 * last entry requires it because doing so has broken 32bit apps
6399 	 * in the past.
6400 	 */
6401 	if (ctx->pos >= INT_MAX)
6402 		ctx->pos = LLONG_MAX;
6403 	else
6404 		ctx->pos = INT_MAX;
6405 nopos:
6406 	ret = 0;
6407 err:
6408 	if (put)
6409 		btrfs_readdir_put_delayed_items(BTRFS_I(inode), &ins_list, &del_list);
6410 	return ret;
6411 }
6412 
6413 /*
6414  * This is somewhat expensive, updating the tree every time the
6415  * inode changes.  But, it is most likely to find the inode in cache.
6416  * FIXME, needs more benchmarking...there are no reasons other than performance
6417  * to keep or drop this code.
6418  */
btrfs_dirty_inode(struct btrfs_inode * inode)6419 static int btrfs_dirty_inode(struct btrfs_inode *inode)
6420 {
6421 	struct btrfs_root *root = inode->root;
6422 	struct btrfs_fs_info *fs_info = root->fs_info;
6423 	struct btrfs_trans_handle *trans;
6424 	int ret;
6425 
6426 	if (test_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags))
6427 		return 0;
6428 
6429 	trans = btrfs_join_transaction(root);
6430 	if (IS_ERR(trans))
6431 		return PTR_ERR(trans);
6432 
6433 	ret = btrfs_update_inode(trans, inode);
6434 	if (ret == -ENOSPC || ret == -EDQUOT) {
6435 		/* whoops, lets try again with the full transaction */
6436 		btrfs_end_transaction(trans);
6437 		trans = btrfs_start_transaction(root, 1);
6438 		if (IS_ERR(trans))
6439 			return PTR_ERR(trans);
6440 
6441 		ret = btrfs_update_inode(trans, inode);
6442 	}
6443 	btrfs_end_transaction(trans);
6444 	if (inode->delayed_node)
6445 		btrfs_balance_delayed_items(fs_info);
6446 
6447 	return ret;
6448 }
6449 
6450 /*
6451  * We need our own ->update_time so that we can return error on ENOSPC for
6452  * updating the inode in the case of file write and mmap writes.
6453  */
btrfs_update_time(struct inode * inode,enum fs_update_time type,unsigned int flags)6454 static int btrfs_update_time(struct inode *inode, enum fs_update_time type,
6455 		unsigned int flags)
6456 {
6457 	struct btrfs_root *root = BTRFS_I(inode)->root;
6458 	int dirty;
6459 
6460 	if (btrfs_root_readonly(root))
6461 		return -EROFS;
6462 	if (flags & IOCB_NOWAIT)
6463 		return -EAGAIN;
6464 
6465 	dirty = inode_update_time(inode, type, flags);
6466 	if (dirty <= 0)
6467 		return dirty;
6468 	return btrfs_dirty_inode(BTRFS_I(inode));
6469 }
6470 
6471 /*
6472  * helper to find a free sequence number in a given directory.  This current
6473  * code is very simple, later versions will do smarter things in the btree
6474  */
btrfs_set_inode_index(struct btrfs_inode * dir,u64 * index)6475 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index)
6476 {
6477 	int ret = 0;
6478 
6479 	if (dir->index_cnt == (u64)-1) {
6480 		ret = btrfs_inode_delayed_dir_index_count(dir);
6481 		if (ret) {
6482 			ret = btrfs_set_inode_index_count(dir);
6483 			if (ret)
6484 				return ret;
6485 		}
6486 	}
6487 
6488 	*index = dir->index_cnt;
6489 	dir->index_cnt++;
6490 
6491 	return ret;
6492 }
6493 
btrfs_insert_inode_locked(struct inode * inode)6494 static int btrfs_insert_inode_locked(struct inode *inode)
6495 {
6496 	struct btrfs_iget_args args;
6497 
6498 	args.ino = btrfs_ino(BTRFS_I(inode));
6499 	args.root = BTRFS_I(inode)->root;
6500 
6501 	return insert_inode_locked4(inode,
6502 		   btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
6503 		   btrfs_find_actor, &args);
6504 }
6505 
btrfs_new_inode_prepare(struct btrfs_new_inode_args * args,unsigned int * trans_num_items)6506 int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args,
6507 			    unsigned int *trans_num_items)
6508 {
6509 	struct inode *dir = args->dir;
6510 	struct inode *inode = args->inode;
6511 	int ret;
6512 
6513 	if (!args->orphan) {
6514 		ret = fscrypt_setup_filename(dir, &args->dentry->d_name, 0,
6515 					     &args->fname);
6516 		if (ret)
6517 			return ret;
6518 	}
6519 
6520 	ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl);
6521 	if (ret) {
6522 		fscrypt_free_filename(&args->fname);
6523 		return ret;
6524 	}
6525 
6526 	/* 1 to add inode item */
6527 	*trans_num_items = 1;
6528 	/* 1 to add compression property */
6529 	if (BTRFS_I(dir)->prop_compress)
6530 		(*trans_num_items)++;
6531 	/* 1 to add default ACL xattr */
6532 	if (args->default_acl)
6533 		(*trans_num_items)++;
6534 	/* 1 to add access ACL xattr */
6535 	if (args->acl)
6536 		(*trans_num_items)++;
6537 #ifdef CONFIG_SECURITY
6538 	/* 1 to add LSM xattr */
6539 	if (dir->i_security)
6540 		(*trans_num_items)++;
6541 #endif
6542 	if (args->orphan) {
6543 		/* 1 to add orphan item */
6544 		(*trans_num_items)++;
6545 	} else {
6546 		/*
6547 		 * 1 to add dir item
6548 		 * 1 to add dir index
6549 		 * 1 to update parent inode item
6550 		 *
6551 		 * No need for 1 unit for the inode ref item because it is
6552 		 * inserted in a batch together with the inode item at
6553 		 * btrfs_create_new_inode().
6554 		 */
6555 		*trans_num_items += 3;
6556 	}
6557 	return 0;
6558 }
6559 
btrfs_new_inode_args_destroy(struct btrfs_new_inode_args * args)6560 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args)
6561 {
6562 	posix_acl_release(args->acl);
6563 	posix_acl_release(args->default_acl);
6564 	fscrypt_free_filename(&args->fname);
6565 }
6566 
6567 /*
6568  * Inherit flags from the parent inode.
6569  *
6570  * Currently only the compression flags and the cow flags are inherited.
6571  */
btrfs_inherit_iflags(struct btrfs_inode * inode,struct btrfs_inode * dir)6572 static void btrfs_inherit_iflags(struct btrfs_inode *inode, struct btrfs_inode *dir)
6573 {
6574 	unsigned int flags;
6575 
6576 	flags = dir->flags;
6577 
6578 	if (flags & BTRFS_INODE_NOCOMPRESS) {
6579 		inode->flags &= ~BTRFS_INODE_COMPRESS;
6580 		inode->flags |= BTRFS_INODE_NOCOMPRESS;
6581 	} else if (flags & BTRFS_INODE_COMPRESS) {
6582 		inode->flags &= ~BTRFS_INODE_NOCOMPRESS;
6583 		inode->flags |= BTRFS_INODE_COMPRESS;
6584 	}
6585 
6586 	if (flags & BTRFS_INODE_NODATACOW) {
6587 		inode->flags |= BTRFS_INODE_NODATACOW;
6588 		if (S_ISREG(inode->vfs_inode.i_mode))
6589 			inode->flags |= BTRFS_INODE_NODATASUM;
6590 	}
6591 
6592 	btrfs_sync_inode_flags_to_i_flags(inode);
6593 }
6594 
btrfs_create_new_inode(struct btrfs_trans_handle * trans,struct btrfs_new_inode_args * args)6595 int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
6596 			   struct btrfs_new_inode_args *args)
6597 {
6598 	struct timespec64 ts;
6599 	struct inode *dir = args->dir;
6600 	struct inode *inode = args->inode;
6601 	const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name;
6602 	struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
6603 	struct btrfs_root *root;
6604 	struct btrfs_inode_item *inode_item;
6605 	struct btrfs_path *path;
6606 	u64 objectid;
6607 	struct btrfs_inode_ref *ref;
6608 	struct btrfs_key key[2];
6609 	u32 sizes[2];
6610 	struct btrfs_item_batch batch;
6611 	unsigned long ptr;
6612 	int ret;
6613 	bool xa_reserved = false;
6614 
6615 	path = btrfs_alloc_path();
6616 	if (!path)
6617 		return -ENOMEM;
6618 
6619 	if (!args->subvol)
6620 		BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root);
6621 	root = BTRFS_I(inode)->root;
6622 
6623 	ret = btrfs_init_file_extent_tree(BTRFS_I(inode));
6624 	if (ret)
6625 		goto out;
6626 
6627 	ret = btrfs_get_free_objectid(root, &objectid);
6628 	if (ret)
6629 		goto out;
6630 	btrfs_set_inode_number(BTRFS_I(inode), objectid);
6631 
6632 	ret = xa_reserve(&root->inodes, objectid, GFP_NOFS);
6633 	if (ret)
6634 		goto out;
6635 	xa_reserved = true;
6636 
6637 	if (args->orphan) {
6638 		/*
6639 		 * O_TMPFILE, set link count to 0, so that after this point, we
6640 		 * fill in an inode item with the correct link count.
6641 		 */
6642 		set_nlink(inode, 0);
6643 	} else {
6644 		trace_btrfs_inode_request(dir);
6645 
6646 		ret = btrfs_set_inode_index(BTRFS_I(dir), &BTRFS_I(inode)->dir_index);
6647 		if (ret)
6648 			goto out;
6649 	}
6650 
6651 	if (S_ISDIR(inode->i_mode))
6652 		BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX;
6653 
6654 	BTRFS_I(inode)->generation = trans->transid;
6655 	inode->i_generation = BTRFS_I(inode)->generation;
6656 
6657 	/*
6658 	 * We don't have any capability xattrs set here yet, shortcut any
6659 	 * queries for the xattrs here.  If we add them later via the inode
6660 	 * security init path or any other path this flag will be cleared.
6661 	 */
6662 	set_bit(BTRFS_INODE_NO_CAP_XATTR, &BTRFS_I(inode)->runtime_flags);
6663 
6664 	/*
6665 	 * Subvolumes don't inherit flags from their parent directory.
6666 	 * Originally this was probably by accident, but we probably can't
6667 	 * change it now without compatibility issues.
6668 	 */
6669 	if (!args->subvol)
6670 		btrfs_inherit_iflags(BTRFS_I(inode), BTRFS_I(dir));
6671 
6672 	btrfs_set_inode_mapping_order(BTRFS_I(inode));
6673 	if (S_ISREG(inode->i_mode)) {
6674 		if (btrfs_test_opt(fs_info, NODATASUM))
6675 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6676 		if (btrfs_test_opt(fs_info, NODATACOW))
6677 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
6678 				BTRFS_INODE_NODATASUM;
6679 		btrfs_update_inode_mapping_flags(BTRFS_I(inode));
6680 	}
6681 
6682 	ret = btrfs_insert_inode_locked(inode);
6683 	if (ret < 0) {
6684 		if (!args->orphan)
6685 			BTRFS_I(dir)->index_cnt--;
6686 		goto out;
6687 	}
6688 
6689 	/*
6690 	 * We could have gotten an inode number from somebody who was fsynced
6691 	 * and then removed in this same transaction, so let's just set full
6692 	 * sync since it will be a full sync anyway and this will blow away the
6693 	 * old info in the log.
6694 	 */
6695 	btrfs_set_inode_full_sync(BTRFS_I(inode));
6696 
6697 	key[0].objectid = objectid;
6698 	key[0].type = BTRFS_INODE_ITEM_KEY;
6699 	key[0].offset = 0;
6700 
6701 	sizes[0] = sizeof(struct btrfs_inode_item);
6702 
6703 	if (!args->orphan) {
6704 		/*
6705 		 * Start new inodes with an inode_ref. This is slightly more
6706 		 * efficient for small numbers of hard links since they will
6707 		 * be packed into one item. Extended refs will kick in if we
6708 		 * add more hard links than can fit in the ref item.
6709 		 */
6710 		key[1].objectid = objectid;
6711 		key[1].type = BTRFS_INODE_REF_KEY;
6712 		if (args->subvol) {
6713 			key[1].offset = objectid;
6714 			sizes[1] = 2 + sizeof(*ref);
6715 		} else {
6716 			key[1].offset = btrfs_ino(BTRFS_I(dir));
6717 			sizes[1] = name->len + sizeof(*ref);
6718 		}
6719 	}
6720 
6721 	batch.keys = &key[0];
6722 	batch.data_sizes = &sizes[0];
6723 	batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]);
6724 	batch.nr = args->orphan ? 1 : 2;
6725 	ret = btrfs_insert_empty_items(trans, root, path, &batch);
6726 	if (unlikely(ret != 0)) {
6727 		btrfs_abort_transaction(trans, ret);
6728 		goto discard;
6729 	}
6730 
6731 	ts = simple_inode_init_ts(inode);
6732 	BTRFS_I(inode)->i_otime_sec = ts.tv_sec;
6733 	BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec;
6734 
6735 	/*
6736 	 * We're going to fill the inode item now, so at this point the inode
6737 	 * must be fully initialized.
6738 	 */
6739 
6740 	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
6741 				  struct btrfs_inode_item);
6742 	memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item,
6743 			     sizeof(*inode_item));
6744 	fill_inode_item(trans, path->nodes[0], inode_item, inode);
6745 
6746 	if (!args->orphan) {
6747 		ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
6748 				     struct btrfs_inode_ref);
6749 		ptr = (unsigned long)(ref + 1);
6750 		if (args->subvol) {
6751 			btrfs_set_inode_ref_name_len(path->nodes[0], ref, 2);
6752 			btrfs_set_inode_ref_index(path->nodes[0], ref, 0);
6753 			write_extent_buffer(path->nodes[0], "..", ptr, 2);
6754 		} else {
6755 			btrfs_set_inode_ref_name_len(path->nodes[0], ref,
6756 						     name->len);
6757 			btrfs_set_inode_ref_index(path->nodes[0], ref,
6758 						  BTRFS_I(inode)->dir_index);
6759 			write_extent_buffer(path->nodes[0], name->name, ptr,
6760 					    name->len);
6761 		}
6762 	}
6763 
6764 	/*
6765 	 * We don't need the path anymore, plus inheriting properties, adding
6766 	 * ACLs, security xattrs, orphan item or adding the link, will result in
6767 	 * allocating yet another path. So just free our path.
6768 	 */
6769 	btrfs_free_path(path);
6770 	path = NULL;
6771 
6772 	if (args->subvol) {
6773 		struct btrfs_inode *parent;
6774 
6775 		/*
6776 		 * Subvolumes inherit properties from their parent subvolume,
6777 		 * not the directory they were created in.
6778 		 */
6779 		parent = btrfs_iget(BTRFS_FIRST_FREE_OBJECTID, BTRFS_I(dir)->root);
6780 		if (IS_ERR(parent)) {
6781 			ret = PTR_ERR(parent);
6782 		} else {
6783 			ret = btrfs_inode_inherit_props(trans, BTRFS_I(inode),
6784 							parent);
6785 			iput(&parent->vfs_inode);
6786 		}
6787 	} else {
6788 		ret = btrfs_inode_inherit_props(trans, BTRFS_I(inode),
6789 						BTRFS_I(dir));
6790 	}
6791 	if (ret) {
6792 		btrfs_err(fs_info,
6793 			  "error inheriting props for ino %llu (root %llu): %d",
6794 			  btrfs_ino(BTRFS_I(inode)), btrfs_root_id(root), ret);
6795 	}
6796 
6797 	/*
6798 	 * Subvolumes don't inherit ACLs or get passed to the LSM. This is
6799 	 * probably a bug.
6800 	 */
6801 	if (!args->subvol) {
6802 		ret = btrfs_init_inode_security(trans, args);
6803 		if (unlikely(ret)) {
6804 			btrfs_abort_transaction(trans, ret);
6805 			goto discard;
6806 		}
6807 	}
6808 
6809 	ret = btrfs_add_inode_to_root(BTRFS_I(inode), false);
6810 	if (WARN_ON(ret)) {
6811 		/* Shouldn't happen, we used xa_reserve() before. */
6812 		btrfs_abort_transaction(trans, ret);
6813 		goto discard;
6814 	}
6815 
6816 	trace_btrfs_inode_new(inode);
6817 	btrfs_set_inode_last_trans(trans, BTRFS_I(inode));
6818 
6819 	btrfs_update_root_times(trans, root);
6820 
6821 	if (args->orphan) {
6822 		ret = btrfs_orphan_add(trans, BTRFS_I(inode));
6823 		if (unlikely(ret)) {
6824 			btrfs_abort_transaction(trans, ret);
6825 			goto discard;
6826 		}
6827 	} else {
6828 		ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
6829 				     0, BTRFS_I(inode)->dir_index);
6830 		if (unlikely(ret)) {
6831 			btrfs_abort_transaction(trans, ret);
6832 			goto discard;
6833 		}
6834 	}
6835 
6836 	return 0;
6837 
6838 discard:
6839 	/*
6840 	 * discard_new_inode() calls iput(), but the caller owns the reference
6841 	 * to the inode.
6842 	 */
6843 	ihold(inode);
6844 	discard_new_inode(inode);
6845 out:
6846 	if (xa_reserved)
6847 		xa_release(&root->inodes, objectid);
6848 
6849 	btrfs_free_path(path);
6850 	return ret;
6851 }
6852 
6853 /*
6854  * utility function to add 'inode' into 'parent_inode' with
6855  * a give name and a given sequence number.
6856  * if 'add_backref' is true, also insert a backref from the
6857  * inode to the parent directory.
6858  */
btrfs_add_link(struct btrfs_trans_handle * trans,struct btrfs_inode * parent_inode,struct btrfs_inode * inode,const struct fscrypt_str * name,bool add_backref,u64 index)6859 int btrfs_add_link(struct btrfs_trans_handle *trans,
6860 		   struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
6861 		   const struct fscrypt_str *name, bool add_backref, u64 index)
6862 {
6863 	int ret = 0;
6864 	struct btrfs_key key;
6865 	struct btrfs_root *root = parent_inode->root;
6866 	u64 ino = btrfs_ino(inode);
6867 	u64 parent_ino = btrfs_ino(parent_inode);
6868 
6869 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6870 		memcpy(&key, &inode->root->root_key, sizeof(key));
6871 	} else {
6872 		key.objectid = ino;
6873 		key.type = BTRFS_INODE_ITEM_KEY;
6874 		key.offset = 0;
6875 	}
6876 
6877 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6878 		ret = btrfs_add_root_ref(trans, key.objectid,
6879 					 btrfs_root_id(root), parent_ino,
6880 					 index, name);
6881 	} else if (add_backref) {
6882 		ret = btrfs_insert_inode_ref(trans, root, name,
6883 					     ino, parent_ino, index);
6884 	}
6885 
6886 	/* Nothing to clean up yet */
6887 	if (ret)
6888 		return ret;
6889 
6890 	ret = btrfs_insert_dir_item(trans, name, parent_inode, &key,
6891 				    btrfs_inode_type(inode), index);
6892 	if (ret == -EEXIST || ret == -EOVERFLOW)
6893 		goto fail_dir_item;
6894 	else if (unlikely(ret)) {
6895 		btrfs_abort_transaction(trans, ret);
6896 		return ret;
6897 	}
6898 
6899 	btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
6900 			   name->len * 2);
6901 	inode_inc_iversion(&parent_inode->vfs_inode);
6902 	update_time_after_link_or_unlink(parent_inode);
6903 
6904 	ret = btrfs_update_inode(trans, parent_inode);
6905 	if (ret)
6906 		btrfs_abort_transaction(trans, ret);
6907 	return ret;
6908 
6909 fail_dir_item:
6910 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6911 		u64 local_index;
6912 		int ret2;
6913 
6914 		ret2 = btrfs_del_root_ref(trans, key.objectid, btrfs_root_id(root),
6915 					  parent_ino, &local_index, name);
6916 		if (ret2)
6917 			btrfs_abort_transaction(trans, ret2);
6918 	} else if (add_backref) {
6919 		int ret2;
6920 
6921 		ret2 = btrfs_del_inode_ref(trans, root, name, ino, parent_ino, NULL);
6922 		if (ret2)
6923 			btrfs_abort_transaction(trans, ret2);
6924 	}
6925 
6926 	/* Return the original error code */
6927 	return ret;
6928 }
6929 
btrfs_create_common(struct inode * dir,struct dentry * dentry,struct inode * inode)6930 static int btrfs_create_common(struct inode *dir, struct dentry *dentry,
6931 			       struct inode *inode)
6932 {
6933 	struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
6934 	struct btrfs_root *root = BTRFS_I(dir)->root;
6935 	struct btrfs_new_inode_args new_inode_args = {
6936 		.dir = dir,
6937 		.dentry = dentry,
6938 		.inode = inode,
6939 	};
6940 	unsigned int trans_num_items;
6941 	struct btrfs_trans_handle *trans;
6942 	int ret;
6943 
6944 	ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
6945 	if (ret)
6946 		goto out_inode;
6947 
6948 	trans = btrfs_start_transaction(root, trans_num_items);
6949 	if (IS_ERR(trans)) {
6950 		ret = PTR_ERR(trans);
6951 		goto out_new_inode_args;
6952 	}
6953 
6954 	ret = btrfs_create_new_inode(trans, &new_inode_args);
6955 	if (!ret) {
6956 		if (S_ISDIR(inode->i_mode))
6957 			inode->i_opflags |= IOP_FASTPERM_MAY_EXEC;
6958 		d_instantiate_new(dentry, inode);
6959 	}
6960 
6961 	btrfs_end_transaction(trans);
6962 	btrfs_btree_balance_dirty(fs_info);
6963 out_new_inode_args:
6964 	btrfs_new_inode_args_destroy(&new_inode_args);
6965 out_inode:
6966 	if (ret)
6967 		iput(inode);
6968 	return ret;
6969 }
6970 
btrfs_mknod(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode,dev_t rdev)6971 static int btrfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
6972 		       struct dentry *dentry, umode_t mode, dev_t rdev)
6973 {
6974 	struct inode *inode;
6975 
6976 	inode = new_inode(dir->i_sb);
6977 	if (!inode)
6978 		return -ENOMEM;
6979 	inode_init_owner(idmap, inode, dir, mode);
6980 	inode->i_op = &btrfs_special_inode_operations;
6981 	init_special_inode(inode, inode->i_mode, rdev);
6982 	return btrfs_create_common(dir, dentry, inode);
6983 }
6984 
btrfs_create(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode,bool excl)6985 static int btrfs_create(struct mnt_idmap *idmap, struct inode *dir,
6986 			struct dentry *dentry, umode_t mode, bool excl)
6987 {
6988 	struct inode *inode;
6989 
6990 	inode = new_inode(dir->i_sb);
6991 	if (!inode)
6992 		return -ENOMEM;
6993 	inode_init_owner(idmap, inode, dir, mode);
6994 	inode->i_fop = &btrfs_file_operations;
6995 	inode->i_op = &btrfs_file_inode_operations;
6996 	inode->i_mapping->a_ops = &btrfs_aops;
6997 	return btrfs_create_common(dir, dentry, inode);
6998 }
6999 
btrfs_link(struct dentry * old_dentry,struct inode * dir,struct dentry * dentry)7000 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
7001 		      struct dentry *dentry)
7002 {
7003 	struct btrfs_trans_handle *trans = NULL;
7004 	struct btrfs_root *root = BTRFS_I(dir)->root;
7005 	struct inode *inode = d_inode(old_dentry);
7006 	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
7007 	struct fscrypt_name fname;
7008 	u64 index;
7009 	int ret;
7010 
7011 	/* do not allow sys_link's with other subvols of the same device */
7012 	if (btrfs_root_id(root) != btrfs_root_id(BTRFS_I(inode)->root))
7013 		return -EXDEV;
7014 
7015 	if (inode->i_nlink >= BTRFS_LINK_MAX)
7016 		return -EMLINK;
7017 
7018 	ret = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname);
7019 	if (ret)
7020 		goto fail;
7021 
7022 	ret = btrfs_set_inode_index(BTRFS_I(dir), &index);
7023 	if (ret)
7024 		goto fail;
7025 
7026 	/*
7027 	 * 2 items for inode and inode ref
7028 	 * 2 items for dir items
7029 	 * 1 item for parent inode
7030 	 * 1 item for orphan item deletion if O_TMPFILE
7031 	 */
7032 	trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6);
7033 	if (IS_ERR(trans)) {
7034 		ret = PTR_ERR(trans);
7035 		trans = NULL;
7036 		goto fail;
7037 	}
7038 
7039 	/* There are several dir indexes for this inode, clear the cache. */
7040 	BTRFS_I(inode)->dir_index = 0ULL;
7041 	inode_inc_iversion(inode);
7042 	inode_set_ctime_current(inode);
7043 
7044 	ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
7045 			     &fname.disk_name, 1, index);
7046 	if (ret)
7047 		goto fail;
7048 
7049 	/* Link added now we update the inode item with the new link count. */
7050 	inc_nlink(inode);
7051 	ret = btrfs_update_inode(trans, BTRFS_I(inode));
7052 	if (unlikely(ret)) {
7053 		btrfs_abort_transaction(trans, ret);
7054 		goto fail;
7055 	}
7056 
7057 	if (inode->i_nlink == 1) {
7058 		/*
7059 		 * If the new hard link count is 1, it's a file created with the
7060 		 * open(2) O_TMPFILE flag.
7061 		 */
7062 		ret = btrfs_orphan_del(trans, BTRFS_I(inode));
7063 		if (unlikely(ret)) {
7064 			btrfs_abort_transaction(trans, ret);
7065 			goto fail;
7066 		}
7067 	}
7068 
7069 	/* Grab reference for the new dentry passed to d_instantiate(). */
7070 	ihold(inode);
7071 	d_instantiate(dentry, inode);
7072 	btrfs_log_new_name(trans, old_dentry, NULL, 0, dentry->d_parent);
7073 
7074 fail:
7075 	fscrypt_free_filename(&fname);
7076 	if (trans)
7077 		btrfs_end_transaction(trans);
7078 	btrfs_btree_balance_dirty(fs_info);
7079 	return ret;
7080 }
7081 
btrfs_mkdir(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode)7082 static struct dentry *btrfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
7083 				  struct dentry *dentry, umode_t mode)
7084 {
7085 	struct inode *inode;
7086 
7087 	inode = new_inode(dir->i_sb);
7088 	if (!inode)
7089 		return ERR_PTR(-ENOMEM);
7090 	inode_init_owner(idmap, inode, dir, S_IFDIR | mode);
7091 	inode->i_op = &btrfs_dir_inode_operations;
7092 	inode->i_fop = &btrfs_dir_file_operations;
7093 	return ERR_PTR(btrfs_create_common(dir, dentry, inode));
7094 }
7095 
uncompress_inline(struct btrfs_path * path,struct folio * folio,struct btrfs_file_extent_item * item)7096 static noinline int uncompress_inline(struct btrfs_path *path,
7097 				      struct folio *folio,
7098 				      struct btrfs_file_extent_item *item)
7099 {
7100 	int ret;
7101 	struct extent_buffer *leaf = path->nodes[0];
7102 	const u32 blocksize = leaf->fs_info->sectorsize;
7103 	char *tmp;
7104 	size_t max_size;
7105 	unsigned long inline_size;
7106 	unsigned long ptr;
7107 	int compress_type;
7108 
7109 	compress_type = btrfs_file_extent_compression(leaf, item);
7110 	max_size = btrfs_file_extent_ram_bytes(leaf, item);
7111 	inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]);
7112 	tmp = kmalloc(inline_size, GFP_NOFS);
7113 	if (!tmp)
7114 		return -ENOMEM;
7115 	ptr = btrfs_file_extent_inline_start(item);
7116 
7117 	read_extent_buffer(leaf, tmp, ptr, inline_size);
7118 
7119 	max_size = min_t(unsigned long, blocksize, max_size);
7120 	ret = btrfs_decompress(compress_type, tmp, folio, 0, inline_size,
7121 			       max_size);
7122 
7123 	/*
7124 	 * decompression code contains a memset to fill in any space between the end
7125 	 * of the uncompressed data and the end of max_size in case the decompressed
7126 	 * data ends up shorter than ram_bytes.  That doesn't cover the hole between
7127 	 * the end of an inline extent and the beginning of the next block, so we
7128 	 * cover that region here.
7129 	 */
7130 
7131 	if (max_size < blocksize)
7132 		folio_zero_range(folio, max_size, blocksize - max_size);
7133 	kfree(tmp);
7134 	return ret;
7135 }
7136 
read_inline_extent(struct btrfs_path * path,struct folio * folio)7137 static int read_inline_extent(struct btrfs_path *path, struct folio *folio)
7138 {
7139 	const u32 blocksize = path->nodes[0]->fs_info->sectorsize;
7140 	struct btrfs_file_extent_item *fi;
7141 	void *kaddr;
7142 	size_t copy_size;
7143 
7144 	if (!folio || folio_test_uptodate(folio))
7145 		return 0;
7146 
7147 	ASSERT(folio_pos(folio) == 0);
7148 
7149 	fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
7150 			    struct btrfs_file_extent_item);
7151 	if (btrfs_file_extent_compression(path->nodes[0], fi) != BTRFS_COMPRESS_NONE)
7152 		return uncompress_inline(path, folio, fi);
7153 
7154 	copy_size = min_t(u64, blocksize,
7155 			  btrfs_file_extent_ram_bytes(path->nodes[0], fi));
7156 	kaddr = kmap_local_folio(folio, 0);
7157 	read_extent_buffer(path->nodes[0], kaddr,
7158 			   btrfs_file_extent_inline_start(fi), copy_size);
7159 	kunmap_local(kaddr);
7160 	if (copy_size < blocksize)
7161 		folio_zero_range(folio, copy_size, blocksize - copy_size);
7162 	return 0;
7163 }
7164 
7165 /*
7166  * Lookup the first extent overlapping a range in a file.
7167  *
7168  * @inode:	file to search in
7169  * @page:	page to read extent data into if the extent is inline
7170  * @start:	file offset
7171  * @len:	length of range starting at @start
7172  *
7173  * Return the first &struct extent_map which overlaps the given range, reading
7174  * it from the B-tree and caching it if necessary. Note that there may be more
7175  * extents which overlap the given range after the returned extent_map.
7176  *
7177  * If @page is not NULL and the extent is inline, this also reads the extent
7178  * data directly into the page and marks the extent up to date in the io_tree.
7179  *
7180  * Return: ERR_PTR on error, non-NULL extent_map on success.
7181  */
btrfs_get_extent(struct btrfs_inode * inode,struct folio * folio,u64 start,u64 len)7182 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
7183 				    struct folio *folio, u64 start, u64 len)
7184 {
7185 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
7186 	int ret = 0;
7187 	u64 extent_start = 0;
7188 	u64 extent_end = 0;
7189 	u64 objectid = btrfs_ino(inode);
7190 	int extent_type = -1;
7191 	struct btrfs_path *path = NULL;
7192 	struct btrfs_root *root = inode->root;
7193 	struct btrfs_file_extent_item *item;
7194 	struct extent_buffer *leaf;
7195 	struct btrfs_key found_key;
7196 	struct extent_map *em = NULL;
7197 	struct extent_map_tree *em_tree = &inode->extent_tree;
7198 
7199 	read_lock(&em_tree->lock);
7200 	em = btrfs_lookup_extent_mapping(em_tree, start, len);
7201 	read_unlock(&em_tree->lock);
7202 
7203 	if (em) {
7204 		if (em->start > start || btrfs_extent_map_end(em) <= start)
7205 			btrfs_free_extent_map(em);
7206 		else if (em->disk_bytenr == EXTENT_MAP_INLINE && folio)
7207 			btrfs_free_extent_map(em);
7208 		else
7209 			goto out;
7210 	}
7211 	em = btrfs_alloc_extent_map();
7212 	if (!em) {
7213 		ret = -ENOMEM;
7214 		goto out;
7215 	}
7216 	em->start = EXTENT_MAP_HOLE;
7217 	em->disk_bytenr = EXTENT_MAP_HOLE;
7218 	em->len = (u64)-1;
7219 
7220 	path = btrfs_alloc_path();
7221 	if (!path) {
7222 		ret = -ENOMEM;
7223 		goto out;
7224 	}
7225 
7226 	/* Chances are we'll be called again, so go ahead and do readahead */
7227 	path->reada = READA_FORWARD;
7228 
7229 	/*
7230 	 * The same explanation in load_free_space_cache applies here as well,
7231 	 * we only read when we're loading the free space cache, and at that
7232 	 * point the commit_root has everything we need.
7233 	 */
7234 	if (btrfs_is_free_space_inode(inode)) {
7235 		path->search_commit_root = true;
7236 		path->skip_locking = true;
7237 	}
7238 
7239 	ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0);
7240 	if (ret < 0) {
7241 		goto out;
7242 	} else if (ret > 0) {
7243 		if (path->slots[0] == 0)
7244 			goto not_found;
7245 		path->slots[0]--;
7246 		ret = 0;
7247 	}
7248 
7249 	leaf = path->nodes[0];
7250 	item = btrfs_item_ptr(leaf, path->slots[0],
7251 			      struct btrfs_file_extent_item);
7252 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7253 	if (found_key.objectid != objectid ||
7254 	    found_key.type != BTRFS_EXTENT_DATA_KEY) {
7255 		/*
7256 		 * If we backup past the first extent we want to move forward
7257 		 * and see if there is an extent in front of us, otherwise we'll
7258 		 * say there is a hole for our whole search range which can
7259 		 * cause problems.
7260 		 */
7261 		extent_end = start;
7262 		goto next;
7263 	}
7264 
7265 	extent_type = btrfs_file_extent_type(leaf, item);
7266 	extent_start = found_key.offset;
7267 	extent_end = btrfs_file_extent_end(path);
7268 	if (extent_type == BTRFS_FILE_EXTENT_REG ||
7269 	    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
7270 		/* Only regular file could have regular/prealloc extent */
7271 		if (unlikely(!S_ISREG(inode->vfs_inode.i_mode))) {
7272 			ret = -EUCLEAN;
7273 			btrfs_crit(fs_info,
7274 		"regular/prealloc extent found for non-regular inode %llu",
7275 				   btrfs_ino(inode));
7276 			goto out;
7277 		}
7278 		trace_btrfs_get_extent_show_fi_regular(inode, leaf, item,
7279 						       extent_start);
7280 	} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
7281 		trace_btrfs_get_extent_show_fi_inline(inode, leaf, item,
7282 						      path->slots[0],
7283 						      extent_start);
7284 	}
7285 next:
7286 	if (start >= extent_end) {
7287 		path->slots[0]++;
7288 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
7289 			ret = btrfs_next_leaf(root, path);
7290 			if (ret < 0)
7291 				goto out;
7292 			else if (ret > 0)
7293 				goto not_found;
7294 
7295 			leaf = path->nodes[0];
7296 		}
7297 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7298 		if (found_key.objectid != objectid ||
7299 		    found_key.type != BTRFS_EXTENT_DATA_KEY)
7300 			goto not_found;
7301 		if (start + len <= found_key.offset)
7302 			goto not_found;
7303 		if (start > found_key.offset)
7304 			goto next;
7305 
7306 		/* New extent overlaps with existing one */
7307 		em->start = start;
7308 		em->len = found_key.offset - start;
7309 		em->disk_bytenr = EXTENT_MAP_HOLE;
7310 		goto insert;
7311 	}
7312 
7313 	btrfs_extent_item_to_extent_map(inode, path, item, em);
7314 
7315 	if (extent_type == BTRFS_FILE_EXTENT_REG ||
7316 	    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
7317 		goto insert;
7318 	} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
7319 		/*
7320 		 * Inline extent can only exist at file offset 0. This is
7321 		 * ensured by tree-checker and inline extent creation path.
7322 		 * Thus all members representing file offsets should be zero.
7323 		 */
7324 		ASSERT(extent_start == 0);
7325 		ASSERT(em->start == 0);
7326 
7327 		/*
7328 		 * btrfs_extent_item_to_extent_map() should have properly
7329 		 * initialized em members already.
7330 		 *
7331 		 * Other members are not utilized for inline extents.
7332 		 */
7333 		ASSERT(em->disk_bytenr == EXTENT_MAP_INLINE);
7334 		ASSERT(em->len == fs_info->sectorsize);
7335 
7336 		ret = read_inline_extent(path, folio);
7337 		if (ret < 0)
7338 			goto out;
7339 		goto insert;
7340 	}
7341 not_found:
7342 	em->start = start;
7343 	em->len = len;
7344 	em->disk_bytenr = EXTENT_MAP_HOLE;
7345 insert:
7346 	ret = 0;
7347 	btrfs_release_path(path);
7348 	if (unlikely(em->start > start || btrfs_extent_map_end(em) <= start)) {
7349 		btrfs_err(fs_info,
7350 			  "bad extent! em: [%llu %llu] passed [%llu %llu]",
7351 			  em->start, em->len, start, len);
7352 		ret = -EIO;
7353 		goto out;
7354 	}
7355 
7356 	write_lock(&em_tree->lock);
7357 	ret = btrfs_add_extent_mapping(inode, &em, start, len);
7358 	write_unlock(&em_tree->lock);
7359 out:
7360 	btrfs_free_path(path);
7361 
7362 	trace_btrfs_get_extent(root, inode, em);
7363 
7364 	if (ret) {
7365 		btrfs_free_extent_map(em);
7366 		return ERR_PTR(ret);
7367 	}
7368 	return em;
7369 }
7370 
btrfs_extent_readonly(struct btrfs_fs_info * fs_info,u64 bytenr)7371 static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
7372 {
7373 	struct btrfs_block_group *block_group;
7374 	bool readonly = false;
7375 
7376 	block_group = btrfs_lookup_block_group(fs_info, bytenr);
7377 	if (!block_group || block_group->ro)
7378 		readonly = true;
7379 	if (block_group)
7380 		btrfs_put_block_group(block_group);
7381 	return readonly;
7382 }
7383 
7384 /*
7385  * Check if we can do nocow write into the range [@offset, @offset + @len)
7386  *
7387  * @offset:	File offset
7388  * @len:	The length to write, will be updated to the nocow writeable
7389  *		range
7390  * @orig_start:	(optional) Return the original file offset of the file extent
7391  * @orig_len:	(optional) Return the original on-disk length of the file extent
7392  * @ram_bytes:	(optional) Return the ram_bytes of the file extent
7393  *
7394  * Return:
7395  * >0	and update @len if we can do nocow write
7396  *  0	if we can't do nocow write
7397  * <0	if error happened
7398  *
7399  * NOTE: This only checks the file extents, caller is responsible to wait for
7400  *	 any ordered extents.
7401  */
can_nocow_extent(struct btrfs_inode * inode,u64 offset,u64 * len,struct btrfs_file_extent * file_extent,bool nowait)7402 noinline int can_nocow_extent(struct btrfs_inode *inode, u64 offset, u64 *len,
7403 			      struct btrfs_file_extent *file_extent,
7404 			      bool nowait)
7405 {
7406 	struct btrfs_root *root = inode->root;
7407 	struct btrfs_fs_info *fs_info = root->fs_info;
7408 	struct can_nocow_file_extent_args nocow_args = { 0 };
7409 	BTRFS_PATH_AUTO_FREE(path);
7410 	int ret;
7411 	struct extent_buffer *leaf;
7412 	struct extent_io_tree *io_tree = &inode->io_tree;
7413 	struct btrfs_file_extent_item *fi;
7414 	struct btrfs_key key;
7415 	int found_type;
7416 
7417 	path = btrfs_alloc_path();
7418 	if (!path)
7419 		return -ENOMEM;
7420 	path->nowait = nowait;
7421 
7422 	ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
7423 				       offset, 0);
7424 	if (ret < 0)
7425 		return ret;
7426 
7427 	if (ret == 1) {
7428 		if (path->slots[0] == 0) {
7429 			/* Can't find the item, must COW. */
7430 			return 0;
7431 		}
7432 		path->slots[0]--;
7433 	}
7434 	ret = 0;
7435 	leaf = path->nodes[0];
7436 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
7437 	if (key.objectid != btrfs_ino(inode) ||
7438 	    key.type != BTRFS_EXTENT_DATA_KEY) {
7439 		/* Not our file or wrong item type, must COW. */
7440 		return 0;
7441 	}
7442 
7443 	if (key.offset > offset) {
7444 		/* Wrong offset, must COW. */
7445 		return 0;
7446 	}
7447 
7448 	if (btrfs_file_extent_end(path) <= offset)
7449 		return 0;
7450 
7451 	fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
7452 	found_type = btrfs_file_extent_type(leaf, fi);
7453 
7454 	nocow_args.start = offset;
7455 	nocow_args.end = offset + *len - 1;
7456 	nocow_args.free_path = true;
7457 
7458 	ret = can_nocow_file_extent(path, &key, inode, &nocow_args);
7459 	/* can_nocow_file_extent() has freed the path. */
7460 	path = NULL;
7461 
7462 	if (ret != 1) {
7463 		/* Treat errors as not being able to NOCOW. */
7464 		return 0;
7465 	}
7466 
7467 	if (btrfs_extent_readonly(fs_info,
7468 				  nocow_args.file_extent.disk_bytenr +
7469 				  nocow_args.file_extent.offset))
7470 		return 0;
7471 
7472 	if (!(inode->flags & BTRFS_INODE_NODATACOW) &&
7473 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7474 		u64 range_end;
7475 
7476 		range_end = round_up(offset + nocow_args.file_extent.num_bytes,
7477 				     root->fs_info->sectorsize) - 1;
7478 		ret = btrfs_test_range_bit_exists(io_tree, offset, range_end,
7479 						  EXTENT_DELALLOC);
7480 		if (ret)
7481 			return -EAGAIN;
7482 	}
7483 
7484 	if (file_extent)
7485 		memcpy(file_extent, &nocow_args.file_extent, sizeof(*file_extent));
7486 
7487 	*len = nocow_args.file_extent.num_bytes;
7488 
7489 	return 1;
7490 }
7491 
7492 /* The callers of this must take lock_extent() */
btrfs_create_io_em(struct btrfs_inode * inode,u64 start,const struct btrfs_file_extent * file_extent,int type)7493 struct extent_map *btrfs_create_io_em(struct btrfs_inode *inode, u64 start,
7494 				      const struct btrfs_file_extent *file_extent,
7495 				      int type)
7496 {
7497 	struct extent_map *em;
7498 	int ret;
7499 
7500 	/*
7501 	 * Note the missing NOCOW type.
7502 	 *
7503 	 * For pure NOCOW writes, we should not create an io extent map, but
7504 	 * just reusing the existing one.
7505 	 * Only PREALLOC writes (NOCOW write into preallocated range) can
7506 	 * create an io extent map.
7507 	 */
7508 	ASSERT(type == BTRFS_ORDERED_PREALLOC ||
7509 	       type == BTRFS_ORDERED_COMPRESSED ||
7510 	       type == BTRFS_ORDERED_REGULAR);
7511 
7512 	switch (type) {
7513 	case BTRFS_ORDERED_PREALLOC:
7514 		/* We're only referring part of a larger preallocated extent. */
7515 		ASSERT(file_extent->num_bytes <= file_extent->ram_bytes);
7516 		break;
7517 	case BTRFS_ORDERED_REGULAR:
7518 		/* COW results a new extent matching our file extent size. */
7519 		ASSERT(file_extent->disk_num_bytes == file_extent->num_bytes);
7520 		ASSERT(file_extent->ram_bytes == file_extent->num_bytes);
7521 
7522 		/* Since it's a new extent, we should not have any offset. */
7523 		ASSERT(file_extent->offset == 0);
7524 		break;
7525 	case BTRFS_ORDERED_COMPRESSED:
7526 		/* Must be compressed. */
7527 		ASSERT(file_extent->compression != BTRFS_COMPRESS_NONE);
7528 
7529 		/*
7530 		 * Encoded write can make us to refer to part of the
7531 		 * uncompressed extent.
7532 		 */
7533 		ASSERT(file_extent->num_bytes <= file_extent->ram_bytes);
7534 		break;
7535 	}
7536 
7537 	em = btrfs_alloc_extent_map();
7538 	if (!em)
7539 		return ERR_PTR(-ENOMEM);
7540 
7541 	em->start = start;
7542 	em->len = file_extent->num_bytes;
7543 	em->disk_bytenr = file_extent->disk_bytenr;
7544 	em->disk_num_bytes = file_extent->disk_num_bytes;
7545 	em->ram_bytes = file_extent->ram_bytes;
7546 	em->generation = -1;
7547 	em->offset = file_extent->offset;
7548 	em->flags |= EXTENT_FLAG_PINNED;
7549 	if (type == BTRFS_ORDERED_COMPRESSED)
7550 		btrfs_extent_map_set_compression(em, file_extent->compression);
7551 
7552 	ret = btrfs_replace_extent_map_range(inode, em, true);
7553 	if (ret) {
7554 		btrfs_free_extent_map(em);
7555 		return ERR_PTR(ret);
7556 	}
7557 
7558 	/* em got 2 refs now, callers needs to do btrfs_free_extent_map once. */
7559 	return em;
7560 }
7561 
7562 /*
7563  * For release_folio() and invalidate_folio() we have a race window where
7564  * folio_end_writeback() is called but the subpage spinlock is not yet released.
7565  * If we continue to release/invalidate the page, we could cause use-after-free
7566  * for subpage spinlock.  So this function is to spin and wait for subpage
7567  * spinlock.
7568  */
wait_subpage_spinlock(struct folio * folio)7569 static void wait_subpage_spinlock(struct folio *folio)
7570 {
7571 	struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
7572 	struct btrfs_folio_state *bfs;
7573 
7574 	if (!btrfs_is_subpage(fs_info, folio))
7575 		return;
7576 
7577 	ASSERT(folio_test_private(folio) && folio_get_private(folio));
7578 	bfs = folio_get_private(folio);
7579 
7580 	/*
7581 	 * This may look insane as we just acquire the spinlock and release it,
7582 	 * without doing anything.  But we just want to make sure no one is
7583 	 * still holding the subpage spinlock.
7584 	 * And since the page is not dirty nor writeback, and we have page
7585 	 * locked, the only possible way to hold a spinlock is from the endio
7586 	 * function to clear page writeback.
7587 	 *
7588 	 * Here we just acquire the spinlock so that all existing callers
7589 	 * should exit and we're safe to release/invalidate the page.
7590 	 */
7591 	spin_lock_irq(&bfs->lock);
7592 	spin_unlock_irq(&bfs->lock);
7593 }
7594 
btrfs_launder_folio(struct folio * folio)7595 static int btrfs_launder_folio(struct folio *folio)
7596 {
7597 	return btrfs_qgroup_free_data(folio_to_inode(folio), NULL, folio_pos(folio),
7598 				      folio_size(folio), NULL);
7599 }
7600 
__btrfs_release_folio(struct folio * folio,gfp_t gfp_flags)7601 static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
7602 {
7603 	if (try_release_extent_mapping(folio, gfp_flags)) {
7604 		wait_subpage_spinlock(folio);
7605 		clear_folio_extent_mapped(folio);
7606 		return true;
7607 	}
7608 	return false;
7609 }
7610 
btrfs_release_folio(struct folio * folio,gfp_t gfp_flags)7611 static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
7612 {
7613 	if (folio_test_writeback(folio) || folio_test_dirty(folio))
7614 		return false;
7615 	return __btrfs_release_folio(folio, gfp_flags);
7616 }
7617 
7618 #ifdef CONFIG_MIGRATION
btrfs_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)7619 static int btrfs_migrate_folio(struct address_space *mapping,
7620 			     struct folio *dst, struct folio *src,
7621 			     enum migrate_mode mode)
7622 {
7623 	int ret = filemap_migrate_folio(mapping, dst, src, mode);
7624 
7625 	if (ret)
7626 		return ret;
7627 
7628 	if (folio_test_ordered(src)) {
7629 		folio_clear_ordered(src);
7630 		folio_set_ordered(dst);
7631 	}
7632 
7633 	return 0;
7634 }
7635 #else
7636 #define btrfs_migrate_folio NULL
7637 #endif
7638 
btrfs_invalidate_folio(struct folio * folio,size_t offset,size_t length)7639 static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
7640 				 size_t length)
7641 {
7642 	struct btrfs_inode *inode = folio_to_inode(folio);
7643 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
7644 	struct extent_io_tree *tree = &inode->io_tree;
7645 	struct extent_state *cached_state = NULL;
7646 	u64 page_start = folio_pos(folio);
7647 	u64 page_end = page_start + folio_size(folio) - 1;
7648 	u64 cur;
7649 	int inode_evicting = inode_state_read_once(&inode->vfs_inode) & I_FREEING;
7650 
7651 	/*
7652 	 * We have folio locked so no new ordered extent can be created on this
7653 	 * page, nor bio can be submitted for this folio.
7654 	 *
7655 	 * But already submitted bio can still be finished on this folio.
7656 	 * Furthermore, endio function won't skip folio which has Ordered
7657 	 * already cleared, so it's possible for endio and
7658 	 * invalidate_folio to do the same ordered extent accounting twice
7659 	 * on one folio.
7660 	 *
7661 	 * So here we wait for any submitted bios to finish, so that we won't
7662 	 * do double ordered extent accounting on the same folio.
7663 	 */
7664 	folio_wait_writeback(folio);
7665 	wait_subpage_spinlock(folio);
7666 
7667 	/*
7668 	 * For subpage case, we have call sites like
7669 	 * btrfs_punch_hole_lock_range() which passes range not aligned to
7670 	 * sectorsize.
7671 	 * If the range doesn't cover the full folio, we don't need to and
7672 	 * shouldn't clear page extent mapped, as folio->private can still
7673 	 * record subpage dirty bits for other part of the range.
7674 	 *
7675 	 * For cases that invalidate the full folio even the range doesn't
7676 	 * cover the full folio, like invalidating the last folio, we're
7677 	 * still safe to wait for ordered extent to finish.
7678 	 */
7679 	if (!(offset == 0 && length == folio_size(folio))) {
7680 		btrfs_release_folio(folio, GFP_NOFS);
7681 		return;
7682 	}
7683 
7684 	if (!inode_evicting)
7685 		btrfs_lock_extent(tree, page_start, page_end, &cached_state);
7686 
7687 	cur = page_start;
7688 	while (cur < page_end) {
7689 		struct btrfs_ordered_extent *ordered;
7690 		u64 range_end;
7691 		u32 range_len;
7692 		u32 extra_flags = 0;
7693 
7694 		ordered = btrfs_lookup_first_ordered_range(inode, cur,
7695 							   page_end + 1 - cur);
7696 		if (!ordered) {
7697 			range_end = page_end;
7698 			/*
7699 			 * No ordered extent covering this range, we are safe
7700 			 * to delete all extent states in the range.
7701 			 */
7702 			extra_flags = EXTENT_CLEAR_ALL_BITS;
7703 			goto next;
7704 		}
7705 		if (ordered->file_offset > cur) {
7706 			/*
7707 			 * There is a range between [cur, oe->file_offset) not
7708 			 * covered by any ordered extent.
7709 			 * We are safe to delete all extent states, and handle
7710 			 * the ordered extent in the next iteration.
7711 			 */
7712 			range_end = ordered->file_offset - 1;
7713 			extra_flags = EXTENT_CLEAR_ALL_BITS;
7714 			goto next;
7715 		}
7716 
7717 		range_end = min(ordered->file_offset + ordered->num_bytes - 1,
7718 				page_end);
7719 		ASSERT(range_end + 1 - cur < U32_MAX);
7720 		range_len = range_end + 1 - cur;
7721 		if (!btrfs_folio_test_ordered(fs_info, folio, cur, range_len)) {
7722 			/*
7723 			 * If Ordered is cleared, it means endio has
7724 			 * already been executed for the range.
7725 			 * We can't delete the extent states as
7726 			 * btrfs_finish_ordered_io() may still use some of them.
7727 			 */
7728 			goto next;
7729 		}
7730 		btrfs_folio_clear_ordered(fs_info, folio, cur, range_len);
7731 
7732 		/*
7733 		 * IO on this page will never be started, so we need to account
7734 		 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW
7735 		 * here, must leave that up for the ordered extent completion.
7736 		 *
7737 		 * This will also unlock the range for incoming
7738 		 * btrfs_finish_ordered_io().
7739 		 */
7740 		if (!inode_evicting)
7741 			btrfs_clear_extent_bit(tree, cur, range_end,
7742 					       EXTENT_DELALLOC |
7743 					       EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
7744 					       EXTENT_DEFRAG, &cached_state);
7745 
7746 		spin_lock(&inode->ordered_tree_lock);
7747 		set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
7748 		ordered->truncated_len = min(ordered->truncated_len,
7749 					     cur - ordered->file_offset);
7750 		spin_unlock(&inode->ordered_tree_lock);
7751 
7752 		/*
7753 		 * If the ordered extent has finished, we're safe to delete all
7754 		 * the extent states of the range, otherwise
7755 		 * btrfs_finish_ordered_io() will get executed by endio for
7756 		 * other pages, so we can't delete extent states.
7757 		 */
7758 		if (btrfs_dec_test_ordered_pending(inode, &ordered,
7759 						   cur, range_end + 1 - cur)) {
7760 			btrfs_finish_ordered_io(ordered);
7761 			/*
7762 			 * The ordered extent has finished, now we're again
7763 			 * safe to delete all extent states of the range.
7764 			 */
7765 			extra_flags = EXTENT_CLEAR_ALL_BITS;
7766 		}
7767 next:
7768 		if (ordered)
7769 			btrfs_put_ordered_extent(ordered);
7770 		/*
7771 		 * Qgroup reserved space handler
7772 		 * Sector(s) here will be either:
7773 		 *
7774 		 * 1) Already written to disk or bio already finished
7775 		 *    Then its QGROUP_RESERVED bit in io_tree is already cleared.
7776 		 *    Qgroup will be handled by its qgroup_record then.
7777 		 *    btrfs_qgroup_free_data() call will do nothing here.
7778 		 *
7779 		 * 2) Not written to disk yet
7780 		 *    Then btrfs_qgroup_free_data() call will clear the
7781 		 *    QGROUP_RESERVED bit of its io_tree, and free the qgroup
7782 		 *    reserved data space.
7783 		 *    Since the IO will never happen for this page.
7784 		 */
7785 		btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur, NULL);
7786 		if (!inode_evicting)
7787 			btrfs_clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED |
7788 					       EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
7789 					       EXTENT_DEFRAG | extra_flags,
7790 					       &cached_state);
7791 		cur = range_end + 1;
7792 	}
7793 	/*
7794 	 * We have iterated through all ordered extents of the page, the page
7795 	 * should not have Ordered anymore, or the above iteration
7796 	 * did something wrong.
7797 	 */
7798 	ASSERT(!folio_test_ordered(folio));
7799 	btrfs_folio_clear_checked(fs_info, folio, folio_pos(folio), folio_size(folio));
7800 	if (!inode_evicting)
7801 		__btrfs_release_folio(folio, GFP_NOFS);
7802 	clear_folio_extent_mapped(folio);
7803 }
7804 
btrfs_truncate(struct btrfs_inode * inode,bool skip_writeback)7805 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
7806 {
7807 	struct btrfs_truncate_control control = {
7808 		.inode = inode,
7809 		.ino = btrfs_ino(inode),
7810 		.min_type = BTRFS_EXTENT_DATA_KEY,
7811 		.clear_extent_range = true,
7812 		.new_size = inode->vfs_inode.i_size,
7813 	};
7814 	struct btrfs_root *root = inode->root;
7815 	struct btrfs_fs_info *fs_info = root->fs_info;
7816 	struct btrfs_block_rsv rsv;
7817 	int ret;
7818 	struct btrfs_trans_handle *trans;
7819 	const u64 min_size = btrfs_calc_metadata_size(fs_info, 1);
7820 	const u64 lock_start = round_down(inode->vfs_inode.i_size, fs_info->sectorsize);
7821 	const u64 i_size_up = round_up(inode->vfs_inode.i_size, fs_info->sectorsize);
7822 
7823 	/* Our inode is locked and the i_size can't be changed concurrently. */
7824 	btrfs_assert_inode_locked(inode);
7825 
7826 	if (!skip_writeback) {
7827 		ret = btrfs_wait_ordered_range(inode, lock_start, (u64)-1);
7828 		if (ret)
7829 			return ret;
7830 	}
7831 
7832 	/*
7833 	 * Yes ladies and gentlemen, this is indeed ugly.  We have a couple of
7834 	 * things going on here:
7835 	 *
7836 	 * 1) We need to reserve space to update our inode.
7837 	 *
7838 	 * 2) We need to have something to cache all the space that is going to
7839 	 * be free'd up by the truncate operation, but also have some slack
7840 	 * space reserved in case it uses space during the truncate (thank you
7841 	 * very much snapshotting).
7842 	 *
7843 	 * And we need these to be separate.  The fact is we can use a lot of
7844 	 * space doing the truncate, and we have no earthly idea how much space
7845 	 * we will use, so we need the truncate reservation to be separate so it
7846 	 * doesn't end up using space reserved for updating the inode.  We also
7847 	 * need to be able to stop the transaction and start a new one, which
7848 	 * means we need to be able to update the inode several times, and we
7849 	 * have no idea of knowing how many times that will be, so we can't just
7850 	 * reserve 1 item for the entirety of the operation, so that has to be
7851 	 * done separately as well.
7852 	 *
7853 	 * So that leaves us with
7854 	 *
7855 	 * 1) rsv - for the truncate reservation, which we will steal from the
7856 	 * transaction reservation.
7857 	 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for
7858 	 * updating the inode.
7859 	 */
7860 	btrfs_init_metadata_block_rsv(fs_info, &rsv, BTRFS_BLOCK_RSV_TEMP);
7861 	rsv.size = min_size;
7862 	rsv.failfast = true;
7863 
7864 	/*
7865 	 * 1 for the truncate slack space
7866 	 * 1 for updating the inode.
7867 	 */
7868 	trans = btrfs_start_transaction(root, 2);
7869 	if (IS_ERR(trans)) {
7870 		ret = PTR_ERR(trans);
7871 		goto out;
7872 	}
7873 
7874 	/* Migrate the slack space for the truncate to our reserve */
7875 	ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, &rsv,
7876 				      min_size, false);
7877 	/*
7878 	 * We have reserved 2 metadata units when we started the transaction and
7879 	 * min_size matches 1 unit, so this should never fail, but if it does,
7880 	 * it's not critical we just fail truncation.
7881 	 */
7882 	if (WARN_ON(ret)) {
7883 		btrfs_end_transaction(trans);
7884 		goto out;
7885 	}
7886 
7887 	trans->block_rsv = &rsv;
7888 
7889 	while (1) {
7890 		struct extent_state *cached_state = NULL;
7891 
7892 		btrfs_lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
7893 		/*
7894 		 * We want to drop from the next block forward in case this new
7895 		 * size is not block aligned since we will be keeping the last
7896 		 * block of the extent just the way it is.
7897 		 */
7898 		btrfs_drop_extent_map_range(inode, i_size_up, (u64)-1, false);
7899 
7900 		ret = btrfs_truncate_inode_items(trans, root, &control);
7901 
7902 		inode_sub_bytes(&inode->vfs_inode, control.sub_bytes);
7903 		btrfs_inode_safe_disk_i_size_write(inode, control.last_size);
7904 
7905 		btrfs_unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
7906 
7907 		trans->block_rsv = &fs_info->trans_block_rsv;
7908 		if (ret != -ENOSPC && ret != -EAGAIN)
7909 			break;
7910 
7911 		ret = btrfs_update_inode(trans, inode);
7912 		if (ret)
7913 			break;
7914 
7915 		btrfs_end_transaction(trans);
7916 		btrfs_btree_balance_dirty(fs_info);
7917 
7918 		trans = btrfs_start_transaction(root, 2);
7919 		if (IS_ERR(trans)) {
7920 			ret = PTR_ERR(trans);
7921 			trans = NULL;
7922 			break;
7923 		}
7924 
7925 		btrfs_block_rsv_release(fs_info, &rsv, -1, NULL);
7926 		ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
7927 					      &rsv, min_size, false);
7928 		/*
7929 		 * We have reserved 2 metadata units when we started the
7930 		 * transaction and min_size matches 1 unit, so this should never
7931 		 * fail, but if it does, it's not critical we just fail truncation.
7932 		 */
7933 		if (WARN_ON(ret))
7934 			break;
7935 
7936 		trans->block_rsv = &rsv;
7937 	}
7938 
7939 	/*
7940 	 * We can't call btrfs_truncate_block inside a trans handle as we could
7941 	 * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we
7942 	 * know we've truncated everything except the last little bit, and can
7943 	 * do btrfs_truncate_block and then update the disk_i_size.
7944 	 */
7945 	if (ret == BTRFS_NEED_TRUNCATE_BLOCK) {
7946 		btrfs_end_transaction(trans);
7947 		btrfs_btree_balance_dirty(fs_info);
7948 
7949 		ret = btrfs_truncate_block(inode, inode->vfs_inode.i_size,
7950 					   inode->vfs_inode.i_size, (u64)-1);
7951 		if (ret)
7952 			goto out;
7953 		trans = btrfs_start_transaction(root, 1);
7954 		if (IS_ERR(trans)) {
7955 			ret = PTR_ERR(trans);
7956 			goto out;
7957 		}
7958 		btrfs_inode_safe_disk_i_size_write(inode, 0);
7959 	}
7960 
7961 	if (trans) {
7962 		int ret2;
7963 
7964 		trans->block_rsv = &fs_info->trans_block_rsv;
7965 		ret2 = btrfs_update_inode(trans, inode);
7966 		if (ret2 && !ret)
7967 			ret = ret2;
7968 
7969 		ret2 = btrfs_end_transaction(trans);
7970 		if (ret2 && !ret)
7971 			ret = ret2;
7972 		btrfs_btree_balance_dirty(fs_info);
7973 	}
7974 out:
7975 	btrfs_block_rsv_release(fs_info, &rsv, (u64)-1, NULL);
7976 	/*
7977 	 * So if we truncate and then write and fsync we normally would just
7978 	 * write the extents that changed, which is a problem if we need to
7979 	 * first truncate that entire inode.  So set this flag so we write out
7980 	 * all of the extents in the inode to the sync log so we're completely
7981 	 * safe.
7982 	 *
7983 	 * If no extents were dropped or trimmed we don't need to force the next
7984 	 * fsync to truncate all the inode's items from the log and re-log them
7985 	 * all. This means the truncate operation did not change the file size,
7986 	 * or changed it to a smaller size but there was only an implicit hole
7987 	 * between the old i_size and the new i_size, and there were no prealloc
7988 	 * extents beyond i_size to drop.
7989 	 */
7990 	if (control.extents_found > 0)
7991 		btrfs_set_inode_full_sync(inode);
7992 
7993 	return ret;
7994 }
7995 
btrfs_new_subvol_inode(struct mnt_idmap * idmap,struct inode * dir)7996 struct inode *btrfs_new_subvol_inode(struct mnt_idmap *idmap,
7997 				     struct inode *dir)
7998 {
7999 	struct inode *inode;
8000 
8001 	inode = new_inode(dir->i_sb);
8002 	if (inode) {
8003 		/*
8004 		 * Subvolumes don't inherit the sgid bit or the parent's gid if
8005 		 * the parent's sgid bit is set. This is probably a bug.
8006 		 */
8007 		inode_init_owner(idmap, inode, NULL,
8008 				 S_IFDIR | (~current_umask() & S_IRWXUGO));
8009 		inode->i_op = &btrfs_dir_inode_operations;
8010 		inode->i_fop = &btrfs_dir_file_operations;
8011 	}
8012 	return inode;
8013 }
8014 
btrfs_alloc_inode(struct super_block * sb)8015 struct inode *btrfs_alloc_inode(struct super_block *sb)
8016 {
8017 	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
8018 	struct btrfs_inode *ei;
8019 	struct inode *inode;
8020 
8021 	ei = alloc_inode_sb(sb, btrfs_inode_cachep, GFP_KERNEL);
8022 	if (!ei)
8023 		return NULL;
8024 
8025 	ei->root = NULL;
8026 	ei->generation = 0;
8027 	ei->last_trans = 0;
8028 	ei->last_sub_trans = 0;
8029 	ei->logged_trans = 0;
8030 	ei->delalloc_bytes = 0;
8031 	/* new_delalloc_bytes and last_dir_index_offset are in a union. */
8032 	ei->new_delalloc_bytes = 0;
8033 	ei->defrag_bytes = 0;
8034 	ei->disk_i_size = 0;
8035 	ei->flags = 0;
8036 	ei->ro_flags = 0;
8037 	/*
8038 	 * ->index_cnt will be properly initialized later when creating a new
8039 	 * inode (btrfs_create_new_inode()) or when reading an existing inode
8040 	 * from disk (btrfs_read_locked_inode()).
8041 	 */
8042 	ei->csum_bytes = 0;
8043 	ei->dir_index = 0;
8044 	ei->last_unlink_trans = 0;
8045 	ei->last_reflink_trans = 0;
8046 	ei->last_log_commit = 0;
8047 
8048 	spin_lock_init(&ei->lock);
8049 	ei->outstanding_extents = 0;
8050 	if (sb->s_magic != BTRFS_TEST_MAGIC)
8051 		btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv,
8052 					      BTRFS_BLOCK_RSV_DELALLOC);
8053 	ei->runtime_flags = 0;
8054 	ei->prop_compress = BTRFS_COMPRESS_NONE;
8055 	ei->defrag_compress = BTRFS_COMPRESS_NONE;
8056 
8057 	ei->delayed_node = NULL;
8058 
8059 	ei->i_otime_sec = 0;
8060 	ei->i_otime_nsec = 0;
8061 
8062 	inode = &ei->vfs_inode;
8063 	btrfs_extent_map_tree_init(&ei->extent_tree);
8064 
8065 	/* This io tree sets the valid inode. */
8066 	btrfs_extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO);
8067 	ei->io_tree.inode = ei;
8068 
8069 	ei->file_extent_tree = NULL;
8070 
8071 	mutex_init(&ei->log_mutex);
8072 	spin_lock_init(&ei->ordered_tree_lock);
8073 	ei->ordered_tree = RB_ROOT;
8074 	ei->ordered_tree_last = NULL;
8075 	INIT_LIST_HEAD(&ei->delalloc_inodes);
8076 	INIT_LIST_HEAD(&ei->delayed_iput);
8077 	init_rwsem(&ei->i_mmap_lock);
8078 
8079 	return inode;
8080 }
8081 
8082 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
btrfs_test_destroy_inode(struct inode * inode)8083 void btrfs_test_destroy_inode(struct inode *inode)
8084 {
8085 	btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
8086 	kfree(BTRFS_I(inode)->file_extent_tree);
8087 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
8088 }
8089 #endif
8090 
btrfs_free_inode(struct inode * inode)8091 void btrfs_free_inode(struct inode *inode)
8092 {
8093 	kfree(BTRFS_I(inode)->file_extent_tree);
8094 	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
8095 }
8096 
btrfs_destroy_inode(struct inode * vfs_inode)8097 void btrfs_destroy_inode(struct inode *vfs_inode)
8098 {
8099 	struct btrfs_ordered_extent *ordered;
8100 	struct btrfs_inode *inode = BTRFS_I(vfs_inode);
8101 	struct btrfs_root *root = inode->root;
8102 	bool freespace_inode;
8103 
8104 	WARN_ON(!hlist_empty(&vfs_inode->i_dentry));
8105 	WARN_ON(vfs_inode->i_data.nrpages);
8106 	WARN_ON(inode->block_rsv.reserved);
8107 	WARN_ON(inode->block_rsv.size);
8108 	WARN_ON(inode->outstanding_extents);
8109 	if (!S_ISDIR(vfs_inode->i_mode)) {
8110 		WARN_ON(inode->delalloc_bytes);
8111 		WARN_ON(inode->new_delalloc_bytes);
8112 		WARN_ON(inode->csum_bytes);
8113 	}
8114 	if (!root || !btrfs_is_data_reloc_root(root))
8115 		WARN_ON(inode->defrag_bytes);
8116 
8117 	/*
8118 	 * This can happen where we create an inode, but somebody else also
8119 	 * created the same inode and we need to destroy the one we already
8120 	 * created.
8121 	 */
8122 	if (!root)
8123 		return;
8124 
8125 	/*
8126 	 * If this is a free space inode do not take the ordered extents lockdep
8127 	 * map.
8128 	 */
8129 	freespace_inode = btrfs_is_free_space_inode(inode);
8130 
8131 	while (1) {
8132 		ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
8133 		if (!ordered)
8134 			break;
8135 		else {
8136 			btrfs_err(root->fs_info,
8137 				  "found ordered extent %llu %llu on inode cleanup",
8138 				  ordered->file_offset, ordered->num_bytes);
8139 
8140 			if (!freespace_inode)
8141 				btrfs_lockdep_acquire(root->fs_info, btrfs_ordered_extent);
8142 
8143 			btrfs_remove_ordered_extent(inode, ordered);
8144 			btrfs_put_ordered_extent(ordered);
8145 			btrfs_put_ordered_extent(ordered);
8146 		}
8147 	}
8148 	btrfs_qgroup_check_reserved_leak(inode);
8149 	btrfs_del_inode_from_root(inode);
8150 	btrfs_drop_extent_map_range(inode, 0, (u64)-1, false);
8151 	btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1);
8152 	btrfs_put_root(inode->root);
8153 }
8154 
btrfs_drop_inode(struct inode * inode)8155 int btrfs_drop_inode(struct inode *inode)
8156 {
8157 	struct btrfs_root *root = BTRFS_I(inode)->root;
8158 
8159 	if (root == NULL)
8160 		return 1;
8161 
8162 	/* the snap/subvol tree is on deleting */
8163 	if (btrfs_root_refs(&root->root_item) == 0)
8164 		return 1;
8165 	else
8166 		return inode_generic_drop(inode);
8167 }
8168 
init_once(void * foo)8169 static void init_once(void *foo)
8170 {
8171 	struct btrfs_inode *ei = foo;
8172 
8173 	inode_init_once(&ei->vfs_inode);
8174 }
8175 
btrfs_destroy_cachep(void)8176 void __cold btrfs_destroy_cachep(void)
8177 {
8178 	/*
8179 	 * Make sure all delayed rcu free inodes are flushed before we
8180 	 * destroy cache.
8181 	 */
8182 	rcu_barrier();
8183 	kmem_cache_destroy(btrfs_inode_cachep);
8184 }
8185 
btrfs_init_cachep(void)8186 int __init btrfs_init_cachep(void)
8187 {
8188 	btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
8189 			sizeof(struct btrfs_inode), 0,
8190 			SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT,
8191 			init_once);
8192 	if (!btrfs_inode_cachep)
8193 		return -ENOMEM;
8194 
8195 	return 0;
8196 }
8197 
btrfs_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int flags)8198 static int btrfs_getattr(struct mnt_idmap *idmap,
8199 			 const struct path *path, struct kstat *stat,
8200 			 u32 request_mask, unsigned int flags)
8201 {
8202 	u64 delalloc_bytes;
8203 	u64 inode_bytes;
8204 	struct inode *inode = d_inode(path->dentry);
8205 	u32 blocksize = btrfs_sb(inode->i_sb)->sectorsize;
8206 	u32 bi_flags = BTRFS_I(inode)->flags;
8207 	u32 bi_ro_flags = BTRFS_I(inode)->ro_flags;
8208 
8209 	stat->result_mask |= STATX_BTIME;
8210 	stat->btime.tv_sec = BTRFS_I(inode)->i_otime_sec;
8211 	stat->btime.tv_nsec = BTRFS_I(inode)->i_otime_nsec;
8212 	if (bi_flags & BTRFS_INODE_APPEND)
8213 		stat->attributes |= STATX_ATTR_APPEND;
8214 	if (bi_flags & BTRFS_INODE_COMPRESS)
8215 		stat->attributes |= STATX_ATTR_COMPRESSED;
8216 	if (bi_flags & BTRFS_INODE_IMMUTABLE)
8217 		stat->attributes |= STATX_ATTR_IMMUTABLE;
8218 	if (bi_flags & BTRFS_INODE_NODUMP)
8219 		stat->attributes |= STATX_ATTR_NODUMP;
8220 	if (bi_ro_flags & BTRFS_INODE_RO_VERITY)
8221 		stat->attributes |= STATX_ATTR_VERITY;
8222 
8223 	stat->attributes_mask |= (STATX_ATTR_APPEND |
8224 				  STATX_ATTR_COMPRESSED |
8225 				  STATX_ATTR_IMMUTABLE |
8226 				  STATX_ATTR_NODUMP);
8227 
8228 	generic_fillattr(idmap, request_mask, inode, stat);
8229 	stat->dev = BTRFS_I(inode)->root->anon_dev;
8230 
8231 	stat->subvol = btrfs_root_id(BTRFS_I(inode)->root);
8232 	stat->result_mask |= STATX_SUBVOL;
8233 
8234 	spin_lock(&BTRFS_I(inode)->lock);
8235 	delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes;
8236 	inode_bytes = inode_get_bytes(inode);
8237 	spin_unlock(&BTRFS_I(inode)->lock);
8238 	stat->blocks = (ALIGN(inode_bytes, blocksize) +
8239 			ALIGN(delalloc_bytes, blocksize)) >> SECTOR_SHIFT;
8240 	return 0;
8241 }
8242 
btrfs_rename_exchange(struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry)8243 static int btrfs_rename_exchange(struct inode *old_dir,
8244 			      struct dentry *old_dentry,
8245 			      struct inode *new_dir,
8246 			      struct dentry *new_dentry)
8247 {
8248 	struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir);
8249 	struct btrfs_trans_handle *trans;
8250 	unsigned int trans_num_items;
8251 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
8252 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
8253 	struct inode *new_inode = new_dentry->d_inode;
8254 	struct inode *old_inode = old_dentry->d_inode;
8255 	struct btrfs_rename_ctx old_rename_ctx;
8256 	struct btrfs_rename_ctx new_rename_ctx;
8257 	u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
8258 	u64 new_ino = btrfs_ino(BTRFS_I(new_inode));
8259 	u64 old_idx = 0;
8260 	u64 new_idx = 0;
8261 	int ret;
8262 	int ret2;
8263 	bool need_abort = false;
8264 	bool logs_pinned = false;
8265 	struct fscrypt_name old_fname, new_fname;
8266 	struct fscrypt_str *old_name, *new_name;
8267 
8268 	/*
8269 	 * For non-subvolumes allow exchange only within one subvolume, in the
8270 	 * same inode namespace. Two subvolumes (represented as directory) can
8271 	 * be exchanged as they're a logical link and have a fixed inode number.
8272 	 */
8273 	if (root != dest &&
8274 	    (old_ino != BTRFS_FIRST_FREE_OBJECTID ||
8275 	     new_ino != BTRFS_FIRST_FREE_OBJECTID))
8276 		return -EXDEV;
8277 
8278 	ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname);
8279 	if (ret)
8280 		return ret;
8281 
8282 	ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname);
8283 	if (ret) {
8284 		fscrypt_free_filename(&old_fname);
8285 		return ret;
8286 	}
8287 
8288 	old_name = &old_fname.disk_name;
8289 	new_name = &new_fname.disk_name;
8290 
8291 	/* close the race window with snapshot create/destroy ioctl */
8292 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID ||
8293 	    new_ino == BTRFS_FIRST_FREE_OBJECTID)
8294 		down_read(&fs_info->subvol_sem);
8295 
8296 	/*
8297 	 * For each inode:
8298 	 * 1 to remove old dir item
8299 	 * 1 to remove old dir index
8300 	 * 1 to add new dir item
8301 	 * 1 to add new dir index
8302 	 * 1 to update parent inode
8303 	 *
8304 	 * If the parents are the same, we only need to account for one
8305 	 */
8306 	trans_num_items = (old_dir == new_dir ? 9 : 10);
8307 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8308 		/*
8309 		 * 1 to remove old root ref
8310 		 * 1 to remove old root backref
8311 		 * 1 to add new root ref
8312 		 * 1 to add new root backref
8313 		 */
8314 		trans_num_items += 4;
8315 	} else {
8316 		/*
8317 		 * 1 to update inode item
8318 		 * 1 to remove old inode ref
8319 		 * 1 to add new inode ref
8320 		 */
8321 		trans_num_items += 3;
8322 	}
8323 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
8324 		trans_num_items += 4;
8325 	else
8326 		trans_num_items += 3;
8327 	trans = btrfs_start_transaction(root, trans_num_items);
8328 	if (IS_ERR(trans)) {
8329 		ret = PTR_ERR(trans);
8330 		goto out_notrans;
8331 	}
8332 
8333 	if (dest != root) {
8334 		ret = btrfs_record_root_in_trans(trans, dest);
8335 		if (ret)
8336 			goto out_fail;
8337 	}
8338 
8339 	/*
8340 	 * We need to find a free sequence number both in the source and
8341 	 * in the destination directory for the exchange.
8342 	 */
8343 	ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx);
8344 	if (ret)
8345 		goto out_fail;
8346 	ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx);
8347 	if (ret)
8348 		goto out_fail;
8349 
8350 	BTRFS_I(old_inode)->dir_index = 0ULL;
8351 	BTRFS_I(new_inode)->dir_index = 0ULL;
8352 
8353 	/* Reference for the source. */
8354 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8355 		/* force full log commit if subvolume involved. */
8356 		btrfs_set_log_full_commit(trans);
8357 	} else {
8358 		ret = btrfs_insert_inode_ref(trans, dest, new_name, old_ino,
8359 					     btrfs_ino(BTRFS_I(new_dir)),
8360 					     old_idx);
8361 		if (ret)
8362 			goto out_fail;
8363 		need_abort = true;
8364 	}
8365 
8366 	/* And now for the dest. */
8367 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
8368 		/* force full log commit if subvolume involved. */
8369 		btrfs_set_log_full_commit(trans);
8370 	} else {
8371 		ret = btrfs_insert_inode_ref(trans, root, old_name, new_ino,
8372 					     btrfs_ino(BTRFS_I(old_dir)),
8373 					     new_idx);
8374 		if (ret) {
8375 			if (unlikely(need_abort))
8376 				btrfs_abort_transaction(trans, ret);
8377 			goto out_fail;
8378 		}
8379 	}
8380 
8381 	/* Update inode version and ctime/mtime. */
8382 	inode_inc_iversion(old_dir);
8383 	inode_inc_iversion(new_dir);
8384 	inode_inc_iversion(old_inode);
8385 	inode_inc_iversion(new_inode);
8386 	simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
8387 
8388 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID &&
8389 	    new_ino != BTRFS_FIRST_FREE_OBJECTID) {
8390 		/*
8391 		 * If we are renaming in the same directory (and it's not for
8392 		 * root entries) pin the log early to prevent any concurrent
8393 		 * task from logging the directory after we removed the old
8394 		 * entries and before we add the new entries, otherwise that
8395 		 * task can sync a log without any entry for the inodes we are
8396 		 * renaming and therefore replaying that log, if a power failure
8397 		 * happens after syncing the log, would result in deleting the
8398 		 * inodes.
8399 		 *
8400 		 * If the rename affects two different directories, we want to
8401 		 * make sure the that there's no log commit that contains
8402 		 * updates for only one of the directories but not for the
8403 		 * other.
8404 		 *
8405 		 * If we are renaming an entry for a root, we don't care about
8406 		 * log updates since we called btrfs_set_log_full_commit().
8407 		 */
8408 		btrfs_pin_log_trans(root);
8409 		btrfs_pin_log_trans(dest);
8410 		logs_pinned = true;
8411 	}
8412 
8413 	if (old_dentry->d_parent != new_dentry->d_parent) {
8414 		btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
8415 					BTRFS_I(old_inode), true);
8416 		btrfs_record_unlink_dir(trans, BTRFS_I(new_dir),
8417 					BTRFS_I(new_inode), true);
8418 	}
8419 
8420 	/* src is a subvolume */
8421 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8422 		ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry);
8423 		if (unlikely(ret)) {
8424 			btrfs_abort_transaction(trans, ret);
8425 			goto out_fail;
8426 		}
8427 	} else { /* src is an inode */
8428 		ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
8429 					   BTRFS_I(old_dentry->d_inode),
8430 					   old_name, &old_rename_ctx);
8431 		if (unlikely(ret)) {
8432 			btrfs_abort_transaction(trans, ret);
8433 			goto out_fail;
8434 		}
8435 		ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
8436 		if (unlikely(ret)) {
8437 			btrfs_abort_transaction(trans, ret);
8438 			goto out_fail;
8439 		}
8440 	}
8441 
8442 	/* dest is a subvolume */
8443 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
8444 		ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
8445 		if (unlikely(ret)) {
8446 			btrfs_abort_transaction(trans, ret);
8447 			goto out_fail;
8448 		}
8449 	} else { /* dest is an inode */
8450 		ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir),
8451 					   BTRFS_I(new_dentry->d_inode),
8452 					   new_name, &new_rename_ctx);
8453 		if (unlikely(ret)) {
8454 			btrfs_abort_transaction(trans, ret);
8455 			goto out_fail;
8456 		}
8457 		ret = btrfs_update_inode(trans, BTRFS_I(new_inode));
8458 		if (unlikely(ret)) {
8459 			btrfs_abort_transaction(trans, ret);
8460 			goto out_fail;
8461 		}
8462 	}
8463 
8464 	ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
8465 			     new_name, 0, old_idx);
8466 	if (unlikely(ret)) {
8467 		btrfs_abort_transaction(trans, ret);
8468 		goto out_fail;
8469 	}
8470 
8471 	ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode),
8472 			     old_name, 0, new_idx);
8473 	if (unlikely(ret)) {
8474 		btrfs_abort_transaction(trans, ret);
8475 		goto out_fail;
8476 	}
8477 
8478 	if (old_inode->i_nlink == 1)
8479 		BTRFS_I(old_inode)->dir_index = old_idx;
8480 	if (new_inode->i_nlink == 1)
8481 		BTRFS_I(new_inode)->dir_index = new_idx;
8482 
8483 	/*
8484 	 * Do the log updates for all inodes.
8485 	 *
8486 	 * If either entry is for a root we don't need to update the logs since
8487 	 * we've called btrfs_set_log_full_commit() before.
8488 	 */
8489 	if (logs_pinned) {
8490 		btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
8491 				   old_rename_ctx.index, new_dentry->d_parent);
8492 		btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir),
8493 				   new_rename_ctx.index, old_dentry->d_parent);
8494 	}
8495 
8496 out_fail:
8497 	if (logs_pinned) {
8498 		btrfs_end_log_trans(root);
8499 		btrfs_end_log_trans(dest);
8500 	}
8501 	ret2 = btrfs_end_transaction(trans);
8502 	ret = ret ? ret : ret2;
8503 out_notrans:
8504 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID ||
8505 	    old_ino == BTRFS_FIRST_FREE_OBJECTID)
8506 		up_read(&fs_info->subvol_sem);
8507 
8508 	fscrypt_free_filename(&new_fname);
8509 	fscrypt_free_filename(&old_fname);
8510 	return ret;
8511 }
8512 
new_whiteout_inode(struct mnt_idmap * idmap,struct inode * dir)8513 static struct inode *new_whiteout_inode(struct mnt_idmap *idmap,
8514 					struct inode *dir)
8515 {
8516 	struct inode *inode;
8517 
8518 	inode = new_inode(dir->i_sb);
8519 	if (inode) {
8520 		inode_init_owner(idmap, inode, dir,
8521 				 S_IFCHR | WHITEOUT_MODE);
8522 		inode->i_op = &btrfs_special_inode_operations;
8523 		init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
8524 	}
8525 	return inode;
8526 }
8527 
btrfs_rename(struct mnt_idmap * idmap,struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry,unsigned int flags)8528 static int btrfs_rename(struct mnt_idmap *idmap,
8529 			struct inode *old_dir, struct dentry *old_dentry,
8530 			struct inode *new_dir, struct dentry *new_dentry,
8531 			unsigned int flags)
8532 {
8533 	struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir);
8534 	struct btrfs_new_inode_args whiteout_args = {
8535 		.dir = old_dir,
8536 		.dentry = old_dentry,
8537 	};
8538 	struct btrfs_trans_handle *trans;
8539 	unsigned int trans_num_items;
8540 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
8541 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
8542 	struct inode *new_inode = d_inode(new_dentry);
8543 	struct inode *old_inode = d_inode(old_dentry);
8544 	struct btrfs_rename_ctx rename_ctx;
8545 	u64 index = 0;
8546 	int ret;
8547 	int ret2;
8548 	u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
8549 	struct fscrypt_name old_fname, new_fname;
8550 	bool logs_pinned = false;
8551 
8552 	if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
8553 		return -EPERM;
8554 
8555 	/* we only allow rename subvolume link between subvolumes */
8556 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
8557 		return -EXDEV;
8558 
8559 	if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
8560 	    (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID))
8561 		return -ENOTEMPTY;
8562 
8563 	if (S_ISDIR(old_inode->i_mode) && new_inode &&
8564 	    new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
8565 		return -ENOTEMPTY;
8566 
8567 	ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname);
8568 	if (ret)
8569 		return ret;
8570 
8571 	ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname);
8572 	if (ret) {
8573 		fscrypt_free_filename(&old_fname);
8574 		return ret;
8575 	}
8576 
8577 	/* check for collisions, even if the  name isn't there */
8578 	ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, &new_fname.disk_name);
8579 	if (ret) {
8580 		if (ret == -EEXIST) {
8581 			/* we shouldn't get
8582 			 * eexist without a new_inode */
8583 			if (WARN_ON(!new_inode)) {
8584 				goto out_fscrypt_names;
8585 			}
8586 		} else {
8587 			/* maybe -EOVERFLOW */
8588 			goto out_fscrypt_names;
8589 		}
8590 	}
8591 	ret = 0;
8592 
8593 	/*
8594 	 * we're using rename to replace one file with another.  Start IO on it
8595 	 * now so  we don't add too much work to the end of the transaction
8596 	 */
8597 	if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
8598 		filemap_flush(old_inode->i_mapping);
8599 
8600 	if (flags & RENAME_WHITEOUT) {
8601 		whiteout_args.inode = new_whiteout_inode(idmap, old_dir);
8602 		if (!whiteout_args.inode) {
8603 			ret = -ENOMEM;
8604 			goto out_fscrypt_names;
8605 		}
8606 		ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items);
8607 		if (ret)
8608 			goto out_whiteout_inode;
8609 	} else {
8610 		/* 1 to update the old parent inode. */
8611 		trans_num_items = 1;
8612 	}
8613 
8614 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8615 		/* Close the race window with snapshot create/destroy ioctl */
8616 		down_read(&fs_info->subvol_sem);
8617 		/*
8618 		 * 1 to remove old root ref
8619 		 * 1 to remove old root backref
8620 		 * 1 to add new root ref
8621 		 * 1 to add new root backref
8622 		 */
8623 		trans_num_items += 4;
8624 	} else {
8625 		/*
8626 		 * 1 to update inode
8627 		 * 1 to remove old inode ref
8628 		 * 1 to add new inode ref
8629 		 */
8630 		trans_num_items += 3;
8631 	}
8632 	/*
8633 	 * 1 to remove old dir item
8634 	 * 1 to remove old dir index
8635 	 * 1 to add new dir item
8636 	 * 1 to add new dir index
8637 	 */
8638 	trans_num_items += 4;
8639 	/* 1 to update new parent inode if it's not the same as the old parent */
8640 	if (new_dir != old_dir)
8641 		trans_num_items++;
8642 	if (new_inode) {
8643 		/*
8644 		 * 1 to update inode
8645 		 * 1 to remove inode ref
8646 		 * 1 to remove dir item
8647 		 * 1 to remove dir index
8648 		 * 1 to possibly add orphan item
8649 		 */
8650 		trans_num_items += 5;
8651 	}
8652 	trans = btrfs_start_transaction(root, trans_num_items);
8653 	if (IS_ERR(trans)) {
8654 		ret = PTR_ERR(trans);
8655 		goto out_notrans;
8656 	}
8657 
8658 	if (dest != root) {
8659 		ret = btrfs_record_root_in_trans(trans, dest);
8660 		if (ret)
8661 			goto out_fail;
8662 	}
8663 
8664 	ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index);
8665 	if (ret)
8666 		goto out_fail;
8667 
8668 	BTRFS_I(old_inode)->dir_index = 0ULL;
8669 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
8670 		/* force full log commit if subvolume involved. */
8671 		btrfs_set_log_full_commit(trans);
8672 	} else {
8673 		ret = btrfs_insert_inode_ref(trans, dest, &new_fname.disk_name,
8674 					     old_ino, btrfs_ino(BTRFS_I(new_dir)),
8675 					     index);
8676 		if (ret)
8677 			goto out_fail;
8678 	}
8679 
8680 	inode_inc_iversion(old_dir);
8681 	inode_inc_iversion(new_dir);
8682 	inode_inc_iversion(old_inode);
8683 	simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
8684 
8685 	if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
8686 		/*
8687 		 * If we are renaming in the same directory (and it's not a
8688 		 * root entry) pin the log to prevent any concurrent task from
8689 		 * logging the directory after we removed the old entry and
8690 		 * before we add the new entry, otherwise that task can sync
8691 		 * a log without any entry for the inode we are renaming and
8692 		 * therefore replaying that log, if a power failure happens
8693 		 * after syncing the log, would result in deleting the inode.
8694 		 *
8695 		 * If the rename affects two different directories, we want to
8696 		 * make sure the that there's no log commit that contains
8697 		 * updates for only one of the directories but not for the
8698 		 * other.
8699 		 *
8700 		 * If we are renaming an entry for a root, we don't care about
8701 		 * log updates since we called btrfs_set_log_full_commit().
8702 		 */
8703 		btrfs_pin_log_trans(root);
8704 		btrfs_pin_log_trans(dest);
8705 		logs_pinned = true;
8706 	}
8707 
8708 	if (old_dentry->d_parent != new_dentry->d_parent)
8709 		btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
8710 					BTRFS_I(old_inode), true);
8711 
8712 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
8713 		ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry);
8714 		if (unlikely(ret)) {
8715 			btrfs_abort_transaction(trans, ret);
8716 			goto out_fail;
8717 		}
8718 	} else {
8719 		ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
8720 					   BTRFS_I(d_inode(old_dentry)),
8721 					   &old_fname.disk_name, &rename_ctx);
8722 		if (unlikely(ret)) {
8723 			btrfs_abort_transaction(trans, ret);
8724 			goto out_fail;
8725 		}
8726 		ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
8727 		if (unlikely(ret)) {
8728 			btrfs_abort_transaction(trans, ret);
8729 			goto out_fail;
8730 		}
8731 	}
8732 
8733 	if (new_inode) {
8734 		inode_inc_iversion(new_inode);
8735 		if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
8736 			     BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
8737 			ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
8738 			if (unlikely(ret)) {
8739 				btrfs_abort_transaction(trans, ret);
8740 				goto out_fail;
8741 			}
8742 			BUG_ON(new_inode->i_nlink == 0);
8743 		} else {
8744 			ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir),
8745 						 BTRFS_I(d_inode(new_dentry)),
8746 						 &new_fname.disk_name);
8747 			if (unlikely(ret)) {
8748 				btrfs_abort_transaction(trans, ret);
8749 				goto out_fail;
8750 			}
8751 		}
8752 		if (new_inode->i_nlink == 0) {
8753 			ret = btrfs_orphan_add(trans,
8754 					BTRFS_I(d_inode(new_dentry)));
8755 			if (unlikely(ret)) {
8756 				btrfs_abort_transaction(trans, ret);
8757 				goto out_fail;
8758 			}
8759 		}
8760 	}
8761 
8762 	ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
8763 			     &new_fname.disk_name, 0, index);
8764 	if (unlikely(ret)) {
8765 		btrfs_abort_transaction(trans, ret);
8766 		goto out_fail;
8767 	}
8768 
8769 	if (old_inode->i_nlink == 1)
8770 		BTRFS_I(old_inode)->dir_index = index;
8771 
8772 	if (logs_pinned)
8773 		btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
8774 				   rename_ctx.index, new_dentry->d_parent);
8775 
8776 	if (flags & RENAME_WHITEOUT) {
8777 		ret = btrfs_create_new_inode(trans, &whiteout_args);
8778 		if (unlikely(ret)) {
8779 			btrfs_abort_transaction(trans, ret);
8780 			goto out_fail;
8781 		} else {
8782 			unlock_new_inode(whiteout_args.inode);
8783 			iput(whiteout_args.inode);
8784 			whiteout_args.inode = NULL;
8785 		}
8786 	}
8787 out_fail:
8788 	if (logs_pinned) {
8789 		btrfs_end_log_trans(root);
8790 		btrfs_end_log_trans(dest);
8791 	}
8792 	ret2 = btrfs_end_transaction(trans);
8793 	ret = ret ? ret : ret2;
8794 out_notrans:
8795 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
8796 		up_read(&fs_info->subvol_sem);
8797 	if (flags & RENAME_WHITEOUT)
8798 		btrfs_new_inode_args_destroy(&whiteout_args);
8799 out_whiteout_inode:
8800 	if (flags & RENAME_WHITEOUT)
8801 		iput(whiteout_args.inode);
8802 out_fscrypt_names:
8803 	fscrypt_free_filename(&old_fname);
8804 	fscrypt_free_filename(&new_fname);
8805 	return ret;
8806 }
8807 
btrfs_rename2(struct mnt_idmap * idmap,struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry,unsigned int flags)8808 static int btrfs_rename2(struct mnt_idmap *idmap, struct inode *old_dir,
8809 			 struct dentry *old_dentry, struct inode *new_dir,
8810 			 struct dentry *new_dentry, unsigned int flags)
8811 {
8812 	int ret;
8813 
8814 	if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
8815 		return -EINVAL;
8816 
8817 	if (flags & RENAME_EXCHANGE)
8818 		ret = btrfs_rename_exchange(old_dir, old_dentry, new_dir,
8819 					    new_dentry);
8820 	else
8821 		ret = btrfs_rename(idmap, old_dir, old_dentry, new_dir,
8822 				   new_dentry, flags);
8823 
8824 	btrfs_btree_balance_dirty(BTRFS_I(new_dir)->root->fs_info);
8825 
8826 	return ret;
8827 }
8828 
8829 struct btrfs_delalloc_work {
8830 	struct inode *inode;
8831 	struct completion completion;
8832 	struct list_head list;
8833 	struct btrfs_work work;
8834 };
8835 
btrfs_run_delalloc_work(struct btrfs_work * work)8836 static void btrfs_run_delalloc_work(struct btrfs_work *work)
8837 {
8838 	struct btrfs_delalloc_work *delalloc_work;
8839 	struct inode *inode;
8840 
8841 	delalloc_work = container_of(work, struct btrfs_delalloc_work,
8842 				     work);
8843 	inode = delalloc_work->inode;
8844 	filemap_flush(inode->i_mapping);
8845 	if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
8846 				&BTRFS_I(inode)->runtime_flags))
8847 		filemap_flush(inode->i_mapping);
8848 
8849 	iput(inode);
8850 	complete(&delalloc_work->completion);
8851 }
8852 
btrfs_alloc_delalloc_work(struct inode * inode)8853 static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode)
8854 {
8855 	struct btrfs_delalloc_work *work;
8856 
8857 	work = kmalloc_obj(*work, GFP_NOFS);
8858 	if (!work)
8859 		return NULL;
8860 
8861 	init_completion(&work->completion);
8862 	INIT_LIST_HEAD(&work->list);
8863 	work->inode = inode;
8864 	btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL);
8865 
8866 	return work;
8867 }
8868 
8869 /*
8870  * some fairly slow code that needs optimization. This walks the list
8871  * of all the inodes with pending delalloc and forces them to disk.
8872  */
start_delalloc_inodes(struct btrfs_root * root,long * nr_to_write,bool snapshot,bool in_reclaim_context)8873 static int start_delalloc_inodes(struct btrfs_root *root, long *nr_to_write,
8874 				 bool snapshot, bool in_reclaim_context)
8875 {
8876 	struct btrfs_delalloc_work *work, *next;
8877 	LIST_HEAD(works);
8878 	LIST_HEAD(splice);
8879 	int ret = 0;
8880 
8881 	mutex_lock(&root->delalloc_mutex);
8882 	spin_lock(&root->delalloc_lock);
8883 	list_splice_init(&root->delalloc_inodes, &splice);
8884 	while (!list_empty(&splice)) {
8885 		struct btrfs_inode *inode;
8886 		struct inode *tmp_inode;
8887 
8888 		inode = list_first_entry(&splice, struct btrfs_inode, delalloc_inodes);
8889 
8890 		list_move_tail(&inode->delalloc_inodes, &root->delalloc_inodes);
8891 
8892 		if (in_reclaim_context &&
8893 		    test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags))
8894 			continue;
8895 
8896 		tmp_inode = igrab(&inode->vfs_inode);
8897 		if (!tmp_inode) {
8898 			cond_resched_lock(&root->delalloc_lock);
8899 			continue;
8900 		}
8901 		spin_unlock(&root->delalloc_lock);
8902 
8903 		if (snapshot)
8904 			set_bit(BTRFS_INODE_SNAPSHOT_FLUSH, &inode->runtime_flags);
8905 		if (nr_to_write == NULL) {
8906 			work = btrfs_alloc_delalloc_work(tmp_inode);
8907 			if (!work) {
8908 				iput(tmp_inode);
8909 				ret = -ENOMEM;
8910 				goto out;
8911 			}
8912 			list_add_tail(&work->list, &works);
8913 			btrfs_queue_work(root->fs_info->flush_workers,
8914 					 &work->work);
8915 		} else {
8916 			ret = filemap_flush_nr(tmp_inode->i_mapping,
8917 					nr_to_write);
8918 			btrfs_add_delayed_iput(inode);
8919 
8920 			if (ret || *nr_to_write <= 0)
8921 				goto out;
8922 		}
8923 		cond_resched();
8924 		spin_lock(&root->delalloc_lock);
8925 	}
8926 	spin_unlock(&root->delalloc_lock);
8927 
8928 out:
8929 	list_for_each_entry_safe(work, next, &works, list) {
8930 		list_del_init(&work->list);
8931 		wait_for_completion(&work->completion);
8932 		kfree(work);
8933 	}
8934 
8935 	if (!list_empty(&splice)) {
8936 		spin_lock(&root->delalloc_lock);
8937 		list_splice_tail(&splice, &root->delalloc_inodes);
8938 		spin_unlock(&root->delalloc_lock);
8939 	}
8940 	mutex_unlock(&root->delalloc_mutex);
8941 	return ret;
8942 }
8943 
btrfs_start_delalloc_snapshot(struct btrfs_root * root,bool in_reclaim_context)8944 int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context)
8945 {
8946 	struct btrfs_fs_info *fs_info = root->fs_info;
8947 
8948 	if (BTRFS_FS_ERROR(fs_info))
8949 		return -EROFS;
8950 	return start_delalloc_inodes(root, NULL, true, in_reclaim_context);
8951 }
8952 
btrfs_start_delalloc_roots(struct btrfs_fs_info * fs_info,long nr,bool in_reclaim_context)8953 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
8954 			       bool in_reclaim_context)
8955 {
8956 	long *nr_to_write = nr == LONG_MAX ? NULL : &nr;
8957 	struct btrfs_root *root;
8958 	LIST_HEAD(splice);
8959 	int ret;
8960 
8961 	if (BTRFS_FS_ERROR(fs_info))
8962 		return -EROFS;
8963 
8964 	mutex_lock(&fs_info->delalloc_root_mutex);
8965 	spin_lock(&fs_info->delalloc_root_lock);
8966 	list_splice_init(&fs_info->delalloc_roots, &splice);
8967 	while (!list_empty(&splice)) {
8968 		root = list_first_entry(&splice, struct btrfs_root,
8969 					delalloc_root);
8970 		root = btrfs_grab_root(root);
8971 		BUG_ON(!root);
8972 		list_move_tail(&root->delalloc_root,
8973 			       &fs_info->delalloc_roots);
8974 		spin_unlock(&fs_info->delalloc_root_lock);
8975 
8976 		ret = start_delalloc_inodes(root, nr_to_write, false,
8977 				in_reclaim_context);
8978 		btrfs_put_root(root);
8979 		if (ret < 0 || nr <= 0)
8980 			goto out;
8981 		spin_lock(&fs_info->delalloc_root_lock);
8982 	}
8983 	spin_unlock(&fs_info->delalloc_root_lock);
8984 
8985 	ret = 0;
8986 out:
8987 	if (!list_empty(&splice)) {
8988 		spin_lock(&fs_info->delalloc_root_lock);
8989 		list_splice_tail(&splice, &fs_info->delalloc_roots);
8990 		spin_unlock(&fs_info->delalloc_root_lock);
8991 	}
8992 	mutex_unlock(&fs_info->delalloc_root_mutex);
8993 	return ret;
8994 }
8995 
btrfs_symlink(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,const char * symname)8996 static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
8997 			 struct dentry *dentry, const char *symname)
8998 {
8999 	struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
9000 	struct btrfs_trans_handle *trans;
9001 	struct btrfs_root *root = BTRFS_I(dir)->root;
9002 	struct btrfs_path *path;
9003 	struct btrfs_key key;
9004 	struct inode *inode;
9005 	struct btrfs_new_inode_args new_inode_args = {
9006 		.dir = dir,
9007 		.dentry = dentry,
9008 	};
9009 	unsigned int trans_num_items;
9010 	int ret;
9011 	int name_len;
9012 	int datasize;
9013 	unsigned long ptr;
9014 	struct btrfs_file_extent_item *ei;
9015 	struct extent_buffer *leaf;
9016 
9017 	name_len = strlen(symname);
9018 	/*
9019 	 * Symlinks utilize uncompressed inline extent data, which should not
9020 	 * reach block size.
9021 	 */
9022 	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
9023 	    name_len >= fs_info->sectorsize)
9024 		return -ENAMETOOLONG;
9025 
9026 	inode = new_inode(dir->i_sb);
9027 	if (!inode)
9028 		return -ENOMEM;
9029 	inode_init_owner(idmap, inode, dir, S_IFLNK | S_IRWXUGO);
9030 	inode->i_op = &btrfs_symlink_inode_operations;
9031 	inode_nohighmem(inode);
9032 	inode->i_mapping->a_ops = &btrfs_aops;
9033 	btrfs_i_size_write(BTRFS_I(inode), name_len);
9034 	inode_set_bytes(inode, name_len);
9035 
9036 	new_inode_args.inode = inode;
9037 	ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
9038 	if (ret)
9039 		goto out_inode;
9040 	/* 1 additional item for the inline extent */
9041 	trans_num_items++;
9042 
9043 	trans = btrfs_start_transaction(root, trans_num_items);
9044 	if (IS_ERR(trans)) {
9045 		ret = PTR_ERR(trans);
9046 		goto out_new_inode_args;
9047 	}
9048 
9049 	ret = btrfs_create_new_inode(trans, &new_inode_args);
9050 	if (ret)
9051 		goto out;
9052 
9053 	path = btrfs_alloc_path();
9054 	if (unlikely(!path)) {
9055 		ret = -ENOMEM;
9056 		btrfs_abort_transaction(trans, ret);
9057 		discard_new_inode(inode);
9058 		inode = NULL;
9059 		goto out;
9060 	}
9061 	key.objectid = btrfs_ino(BTRFS_I(inode));
9062 	key.type = BTRFS_EXTENT_DATA_KEY;
9063 	key.offset = 0;
9064 	datasize = btrfs_file_extent_calc_inline_size(name_len);
9065 	ret = btrfs_insert_empty_item(trans, root, path, &key, datasize);
9066 	if (unlikely(ret)) {
9067 		btrfs_abort_transaction(trans, ret);
9068 		btrfs_free_path(path);
9069 		discard_new_inode(inode);
9070 		inode = NULL;
9071 		goto out;
9072 	}
9073 	leaf = path->nodes[0];
9074 	ei = btrfs_item_ptr(leaf, path->slots[0],
9075 			    struct btrfs_file_extent_item);
9076 	btrfs_set_file_extent_generation(leaf, ei, trans->transid);
9077 	btrfs_set_file_extent_type(leaf, ei,
9078 				   BTRFS_FILE_EXTENT_INLINE);
9079 	btrfs_set_file_extent_encryption(leaf, ei, 0);
9080 	btrfs_set_file_extent_compression(leaf, ei, 0);
9081 	btrfs_set_file_extent_other_encoding(leaf, ei, 0);
9082 	btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
9083 
9084 	ptr = btrfs_file_extent_inline_start(ei);
9085 	write_extent_buffer(leaf, symname, ptr, name_len);
9086 	btrfs_free_path(path);
9087 
9088 	d_instantiate_new(dentry, inode);
9089 	ret = 0;
9090 out:
9091 	btrfs_end_transaction(trans);
9092 	btrfs_btree_balance_dirty(fs_info);
9093 out_new_inode_args:
9094 	btrfs_new_inode_args_destroy(&new_inode_args);
9095 out_inode:
9096 	if (ret)
9097 		iput(inode);
9098 	return ret;
9099 }
9100 
insert_prealloc_file_extent(struct btrfs_trans_handle * trans_in,struct btrfs_inode * inode,struct btrfs_key * ins,u64 file_offset)9101 static struct btrfs_trans_handle *insert_prealloc_file_extent(
9102 				       struct btrfs_trans_handle *trans_in,
9103 				       struct btrfs_inode *inode,
9104 				       struct btrfs_key *ins,
9105 				       u64 file_offset)
9106 {
9107 	struct btrfs_file_extent_item stack_fi;
9108 	struct btrfs_replace_extent_info extent_info;
9109 	struct btrfs_trans_handle *trans = trans_in;
9110 	struct btrfs_path *path;
9111 	u64 start = ins->objectid;
9112 	u64 len = ins->offset;
9113 	u64 qgroup_released = 0;
9114 	int ret;
9115 
9116 	memset(&stack_fi, 0, sizeof(stack_fi));
9117 
9118 	btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_PREALLOC);
9119 	btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, start);
9120 	btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, len);
9121 	btrfs_set_stack_file_extent_num_bytes(&stack_fi, len);
9122 	btrfs_set_stack_file_extent_ram_bytes(&stack_fi, len);
9123 	btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE);
9124 	/* Encryption and other encoding is reserved and all 0 */
9125 
9126 	ret = btrfs_qgroup_release_data(inode, file_offset, len, &qgroup_released);
9127 	if (ret < 0)
9128 		return ERR_PTR(ret);
9129 
9130 	if (trans) {
9131 		ret = insert_reserved_file_extent(trans, inode,
9132 						  file_offset, &stack_fi,
9133 						  true, qgroup_released);
9134 		if (ret)
9135 			goto free_qgroup;
9136 		return trans;
9137 	}
9138 
9139 	extent_info.disk_offset = start;
9140 	extent_info.disk_len = len;
9141 	extent_info.data_offset = 0;
9142 	extent_info.data_len = len;
9143 	extent_info.file_offset = file_offset;
9144 	extent_info.extent_buf = (char *)&stack_fi;
9145 	extent_info.is_new_extent = true;
9146 	extent_info.update_times = true;
9147 	extent_info.qgroup_reserved = qgroup_released;
9148 	extent_info.insertions = 0;
9149 
9150 	path = btrfs_alloc_path();
9151 	if (!path) {
9152 		ret = -ENOMEM;
9153 		goto free_qgroup;
9154 	}
9155 
9156 	ret = btrfs_replace_file_extents(inode, path, file_offset,
9157 				     file_offset + len - 1, &extent_info,
9158 				     &trans);
9159 	btrfs_free_path(path);
9160 	if (ret)
9161 		goto free_qgroup;
9162 	return trans;
9163 
9164 free_qgroup:
9165 	/*
9166 	 * We have released qgroup data range at the beginning of the function,
9167 	 * and normally qgroup_released bytes will be freed when committing
9168 	 * transaction.
9169 	 * But if we error out early, we have to free what we have released
9170 	 * or we leak qgroup data reservation.
9171 	 */
9172 	btrfs_qgroup_free_refroot(inode->root->fs_info,
9173 			btrfs_root_id(inode->root), qgroup_released,
9174 			BTRFS_QGROUP_RSV_DATA);
9175 	return ERR_PTR(ret);
9176 }
9177 
__btrfs_prealloc_file_range(struct inode * inode,int mode,u64 start,u64 num_bytes,u64 min_size,loff_t actual_len,u64 * alloc_hint,struct btrfs_trans_handle * trans)9178 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
9179 				       u64 start, u64 num_bytes, u64 min_size,
9180 				       loff_t actual_len, u64 *alloc_hint,
9181 				       struct btrfs_trans_handle *trans)
9182 {
9183 	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
9184 	struct extent_map *em;
9185 	struct btrfs_root *root = BTRFS_I(inode)->root;
9186 	struct btrfs_key ins;
9187 	u64 cur_offset = start;
9188 	u64 clear_offset = start;
9189 	u64 i_size;
9190 	u64 cur_bytes;
9191 	u64 last_alloc = (u64)-1;
9192 	int ret = 0;
9193 	bool own_trans = true;
9194 	u64 end = start + num_bytes - 1;
9195 
9196 	if (trans)
9197 		own_trans = false;
9198 	while (num_bytes > 0) {
9199 		cur_bytes = min_t(u64, num_bytes, SZ_256M);
9200 		cur_bytes = max(cur_bytes, min_size);
9201 		/*
9202 		 * If we are severely fragmented we could end up with really
9203 		 * small allocations, so if the allocator is returning small
9204 		 * chunks lets make its job easier by only searching for those
9205 		 * sized chunks.
9206 		 */
9207 		cur_bytes = min(cur_bytes, last_alloc);
9208 		ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes,
9209 				min_size, 0, *alloc_hint, &ins, true, false);
9210 		if (ret)
9211 			break;
9212 
9213 		/*
9214 		 * We've reserved this space, and thus converted it from
9215 		 * ->bytes_may_use to ->bytes_reserved.  Any error that happens
9216 		 * from here on out we will only need to clear our reservation
9217 		 * for the remaining unreserved area, so advance our
9218 		 * clear_offset by our extent size.
9219 		 */
9220 		clear_offset += ins.offset;
9221 
9222 		last_alloc = ins.offset;
9223 		trans = insert_prealloc_file_extent(trans, BTRFS_I(inode),
9224 						    &ins, cur_offset);
9225 		/*
9226 		 * Now that we inserted the prealloc extent we can finally
9227 		 * decrement the number of reservations in the block group.
9228 		 * If we did it before, we could race with relocation and have
9229 		 * relocation miss the reserved extent, making it fail later.
9230 		 */
9231 		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
9232 		if (IS_ERR(trans)) {
9233 			ret = PTR_ERR(trans);
9234 			btrfs_free_reserved_extent(fs_info, ins.objectid,
9235 						   ins.offset, false);
9236 			break;
9237 		}
9238 
9239 		em = btrfs_alloc_extent_map();
9240 		if (!em) {
9241 			btrfs_drop_extent_map_range(BTRFS_I(inode), cur_offset,
9242 					    cur_offset + ins.offset - 1, false);
9243 			btrfs_set_inode_full_sync(BTRFS_I(inode));
9244 			goto next;
9245 		}
9246 
9247 		em->start = cur_offset;
9248 		em->len = ins.offset;
9249 		em->disk_bytenr = ins.objectid;
9250 		em->offset = 0;
9251 		em->disk_num_bytes = ins.offset;
9252 		em->ram_bytes = ins.offset;
9253 		em->flags |= EXTENT_FLAG_PREALLOC;
9254 		em->generation = trans->transid;
9255 
9256 		ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, true);
9257 		btrfs_free_extent_map(em);
9258 next:
9259 		num_bytes -= ins.offset;
9260 		cur_offset += ins.offset;
9261 		*alloc_hint = ins.objectid + ins.offset;
9262 
9263 		inode_inc_iversion(inode);
9264 		inode_set_ctime_current(inode);
9265 		BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
9266 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
9267 		    (actual_len > inode->i_size) &&
9268 		    (cur_offset > inode->i_size)) {
9269 			if (cur_offset > actual_len)
9270 				i_size = actual_len;
9271 			else
9272 				i_size = cur_offset;
9273 			i_size_write(inode, i_size);
9274 			btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
9275 		}
9276 
9277 		ret = btrfs_update_inode(trans, BTRFS_I(inode));
9278 
9279 		if (unlikely(ret)) {
9280 			btrfs_abort_transaction(trans, ret);
9281 			if (own_trans)
9282 				btrfs_end_transaction(trans);
9283 			break;
9284 		}
9285 
9286 		if (own_trans) {
9287 			btrfs_end_transaction(trans);
9288 			trans = NULL;
9289 		}
9290 	}
9291 	if (clear_offset < end)
9292 		btrfs_free_reserved_data_space(BTRFS_I(inode), NULL, clear_offset,
9293 			end - clear_offset + 1);
9294 	return ret;
9295 }
9296 
btrfs_prealloc_file_range(struct inode * inode,int mode,u64 start,u64 num_bytes,u64 min_size,loff_t actual_len,u64 * alloc_hint)9297 int btrfs_prealloc_file_range(struct inode *inode, int mode,
9298 			      u64 start, u64 num_bytes, u64 min_size,
9299 			      loff_t actual_len, u64 *alloc_hint)
9300 {
9301 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9302 					   min_size, actual_len, alloc_hint,
9303 					   NULL);
9304 }
9305 
btrfs_prealloc_file_range_trans(struct inode * inode,struct btrfs_trans_handle * trans,int mode,u64 start,u64 num_bytes,u64 min_size,loff_t actual_len,u64 * alloc_hint)9306 int btrfs_prealloc_file_range_trans(struct inode *inode,
9307 				    struct btrfs_trans_handle *trans, int mode,
9308 				    u64 start, u64 num_bytes, u64 min_size,
9309 				    loff_t actual_len, u64 *alloc_hint)
9310 {
9311 	return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9312 					   min_size, actual_len, alloc_hint, trans);
9313 }
9314 
9315 /*
9316  * NOTE: in case you are adding MAY_EXEC check for directories:
9317  * we are marking them with IOP_FASTPERM_MAY_EXEC, allowing path lookup to
9318  * elide calls here.
9319  */
btrfs_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)9320 static int btrfs_permission(struct mnt_idmap *idmap,
9321 			    struct inode *inode, int mask)
9322 {
9323 	struct btrfs_root *root = BTRFS_I(inode)->root;
9324 	umode_t mode = inode->i_mode;
9325 
9326 	if (mask & MAY_WRITE &&
9327 	    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
9328 		if (btrfs_root_readonly(root))
9329 			return -EROFS;
9330 		if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
9331 			return -EACCES;
9332 	}
9333 	return generic_permission(idmap, inode, mask);
9334 }
9335 
btrfs_tmpfile(struct mnt_idmap * idmap,struct inode * dir,struct file * file,umode_t mode)9336 static int btrfs_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
9337 			 struct file *file, umode_t mode)
9338 {
9339 	struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
9340 	struct btrfs_trans_handle *trans;
9341 	struct btrfs_root *root = BTRFS_I(dir)->root;
9342 	struct inode *inode;
9343 	struct btrfs_new_inode_args new_inode_args = {
9344 		.dir = dir,
9345 		.dentry = file->f_path.dentry,
9346 		.orphan = true,
9347 	};
9348 	unsigned int trans_num_items;
9349 	int ret;
9350 
9351 	inode = new_inode(dir->i_sb);
9352 	if (!inode)
9353 		return -ENOMEM;
9354 	inode_init_owner(idmap, inode, dir, mode);
9355 	inode->i_fop = &btrfs_file_operations;
9356 	inode->i_op = &btrfs_file_inode_operations;
9357 	inode->i_mapping->a_ops = &btrfs_aops;
9358 
9359 	new_inode_args.inode = inode;
9360 	ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
9361 	if (ret)
9362 		goto out_inode;
9363 
9364 	trans = btrfs_start_transaction(root, trans_num_items);
9365 	if (IS_ERR(trans)) {
9366 		ret = PTR_ERR(trans);
9367 		goto out_new_inode_args;
9368 	}
9369 
9370 	ret = btrfs_create_new_inode(trans, &new_inode_args);
9371 
9372 	/*
9373 	 * We set number of links to 0 in btrfs_create_new_inode(), and here we
9374 	 * set it to 1 because d_tmpfile() will issue a warning if the count is
9375 	 * 0, through:
9376 	 *
9377 	 *    d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
9378 	 */
9379 	set_nlink(inode, 1);
9380 
9381 	if (!ret) {
9382 		d_tmpfile(file, inode);
9383 		unlock_new_inode(inode);
9384 		mark_inode_dirty(inode);
9385 	}
9386 
9387 	btrfs_end_transaction(trans);
9388 	btrfs_btree_balance_dirty(fs_info);
9389 out_new_inode_args:
9390 	btrfs_new_inode_args_destroy(&new_inode_args);
9391 out_inode:
9392 	if (ret)
9393 		iput(inode);
9394 	return finish_open_simple(file, ret);
9395 }
9396 
btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info * fs_info,int compress_type)9397 int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
9398 					     int compress_type)
9399 {
9400 	switch (compress_type) {
9401 	case BTRFS_COMPRESS_NONE:
9402 		return BTRFS_ENCODED_IO_COMPRESSION_NONE;
9403 	case BTRFS_COMPRESS_ZLIB:
9404 		return BTRFS_ENCODED_IO_COMPRESSION_ZLIB;
9405 	case BTRFS_COMPRESS_LZO:
9406 		/*
9407 		 * The LZO format depends on the sector size. 64K is the maximum
9408 		 * sector size that we support.
9409 		 */
9410 		if (fs_info->sectorsize < SZ_4K || fs_info->sectorsize > SZ_64K)
9411 			return -EINVAL;
9412 		return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K +
9413 		       (fs_info->sectorsize_bits - 12);
9414 	case BTRFS_COMPRESS_ZSTD:
9415 		return BTRFS_ENCODED_IO_COMPRESSION_ZSTD;
9416 	default:
9417 		return -EUCLEAN;
9418 	}
9419 }
9420 
btrfs_encoded_read_inline(struct kiocb * iocb,struct iov_iter * iter,u64 start,u64 lockend,struct extent_state ** cached_state,u64 extent_start,size_t count,struct btrfs_ioctl_encoded_io_args * encoded,bool * unlocked)9421 static ssize_t btrfs_encoded_read_inline(
9422 				struct kiocb *iocb,
9423 				struct iov_iter *iter, u64 start,
9424 				u64 lockend,
9425 				struct extent_state **cached_state,
9426 				u64 extent_start, size_t count,
9427 				struct btrfs_ioctl_encoded_io_args *encoded,
9428 				bool *unlocked)
9429 {
9430 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
9431 	struct btrfs_root *root = inode->root;
9432 	struct btrfs_fs_info *fs_info = root->fs_info;
9433 	struct extent_io_tree *io_tree = &inode->io_tree;
9434 	BTRFS_PATH_AUTO_FREE(path);
9435 	struct extent_buffer *leaf;
9436 	struct btrfs_file_extent_item *item;
9437 	u64 ram_bytes;
9438 	unsigned long ptr;
9439 	void *tmp;
9440 	ssize_t ret;
9441 	const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
9442 
9443 	path = btrfs_alloc_path();
9444 	if (!path)
9445 		return -ENOMEM;
9446 
9447 	path->nowait = nowait;
9448 
9449 	ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
9450 				       extent_start, 0);
9451 	if (ret) {
9452 		if (unlikely(ret > 0)) {
9453 			/* The extent item disappeared? */
9454 			return -EIO;
9455 		}
9456 		return ret;
9457 	}
9458 	leaf = path->nodes[0];
9459 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
9460 
9461 	ram_bytes = btrfs_file_extent_ram_bytes(leaf, item);
9462 	ptr = btrfs_file_extent_inline_start(item);
9463 
9464 	encoded->len = min_t(u64, extent_start + ram_bytes,
9465 			     inode->vfs_inode.i_size) - iocb->ki_pos;
9466 	ret = btrfs_encoded_io_compression_from_extent(fs_info,
9467 				 btrfs_file_extent_compression(leaf, item));
9468 	if (ret < 0)
9469 		return ret;
9470 	encoded->compression = ret;
9471 	if (encoded->compression) {
9472 		size_t inline_size;
9473 
9474 		inline_size = btrfs_file_extent_inline_item_len(leaf,
9475 								path->slots[0]);
9476 		if (inline_size > count)
9477 			return -ENOBUFS;
9478 
9479 		count = inline_size;
9480 		encoded->unencoded_len = ram_bytes;
9481 		encoded->unencoded_offset = iocb->ki_pos - extent_start;
9482 	} else {
9483 		count = min_t(u64, count, encoded->len);
9484 		encoded->len = count;
9485 		encoded->unencoded_len = count;
9486 		ptr += iocb->ki_pos - extent_start;
9487 	}
9488 
9489 	tmp = kmalloc(count, GFP_NOFS);
9490 	if (!tmp)
9491 		return -ENOMEM;
9492 
9493 	read_extent_buffer(leaf, tmp, ptr, count);
9494 	btrfs_release_path(path);
9495 	btrfs_unlock_extent(io_tree, start, lockend, cached_state);
9496 	btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9497 	*unlocked = true;
9498 
9499 	ret = copy_to_iter(tmp, count, iter);
9500 	if (ret != count)
9501 		ret = -EFAULT;
9502 	kfree(tmp);
9503 
9504 	return ret;
9505 }
9506 
9507 struct btrfs_encoded_read_private {
9508 	struct completion *sync_reads;
9509 	void *uring_ctx;
9510 	refcount_t pending_refs;
9511 	blk_status_t status;
9512 };
9513 
btrfs_encoded_read_endio(struct btrfs_bio * bbio)9514 static void btrfs_encoded_read_endio(struct btrfs_bio *bbio)
9515 {
9516 	struct btrfs_encoded_read_private *priv = bbio->private;
9517 
9518 	if (bbio->bio.bi_status) {
9519 		/*
9520 		 * The memory barrier implied by the refcount_dec_and_test() here
9521 		 * pairs with the memory barrier implied by the refcount_dec_and_test()
9522 		 * in btrfs_encoded_read_regular_fill_pages() to ensure that
9523 		 * this write is observed before the load of status in
9524 		 * btrfs_encoded_read_regular_fill_pages().
9525 		 */
9526 		WRITE_ONCE(priv->status, bbio->bio.bi_status);
9527 	}
9528 	if (refcount_dec_and_test(&priv->pending_refs)) {
9529 		int err = blk_status_to_errno(READ_ONCE(priv->status));
9530 
9531 		if (priv->uring_ctx) {
9532 			btrfs_uring_read_extent_endio(priv->uring_ctx, err);
9533 			kfree(priv);
9534 		} else {
9535 			complete(priv->sync_reads);
9536 		}
9537 	}
9538 	bio_put(&bbio->bio);
9539 }
9540 
btrfs_encoded_read_regular_fill_pages(struct btrfs_inode * inode,u64 disk_bytenr,u64 disk_io_size,struct page ** pages,void * uring_ctx)9541 int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
9542 					  u64 disk_bytenr, u64 disk_io_size,
9543 					  struct page **pages, void *uring_ctx)
9544 {
9545 	struct btrfs_encoded_read_private *priv, sync_priv;
9546 	struct completion sync_reads;
9547 	unsigned long i = 0;
9548 	struct btrfs_bio *bbio;
9549 	int ret;
9550 
9551 	/*
9552 	 * Fast path for synchronous reads which completes in this call, io_uring
9553 	 * needs longer time span.
9554 	 */
9555 	if (uring_ctx) {
9556 		priv = kmalloc_obj(struct btrfs_encoded_read_private, GFP_NOFS);
9557 		if (!priv)
9558 			return -ENOMEM;
9559 	} else {
9560 		priv = &sync_priv;
9561 		init_completion(&sync_reads);
9562 		priv->sync_reads = &sync_reads;
9563 	}
9564 
9565 	refcount_set(&priv->pending_refs, 1);
9566 	priv->status = 0;
9567 	priv->uring_ctx = uring_ctx;
9568 
9569 	bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, inode, 0,
9570 			       btrfs_encoded_read_endio, priv);
9571 	bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
9572 
9573 	do {
9574 		size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE);
9575 
9576 		if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) {
9577 			refcount_inc(&priv->pending_refs);
9578 			btrfs_submit_bbio(bbio, 0);
9579 
9580 			bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, inode, 0,
9581 					       btrfs_encoded_read_endio, priv);
9582 			bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
9583 			continue;
9584 		}
9585 
9586 		i++;
9587 		disk_bytenr += bytes;
9588 		disk_io_size -= bytes;
9589 	} while (disk_io_size);
9590 
9591 	refcount_inc(&priv->pending_refs);
9592 	btrfs_submit_bbio(bbio, 0);
9593 
9594 	if (uring_ctx) {
9595 		if (refcount_dec_and_test(&priv->pending_refs)) {
9596 			ret = blk_status_to_errno(READ_ONCE(priv->status));
9597 			btrfs_uring_read_extent_endio(uring_ctx, ret);
9598 			kfree(priv);
9599 			return ret;
9600 		}
9601 
9602 		return -EIOCBQUEUED;
9603 	} else {
9604 		if (!refcount_dec_and_test(&priv->pending_refs))
9605 			wait_for_completion_io(&sync_reads);
9606 		/* See btrfs_encoded_read_endio() for ordering. */
9607 		return blk_status_to_errno(READ_ONCE(priv->status));
9608 	}
9609 }
9610 
btrfs_encoded_read_regular(struct kiocb * iocb,struct iov_iter * iter,u64 start,u64 lockend,struct extent_state ** cached_state,u64 disk_bytenr,u64 disk_io_size,size_t count,bool compressed,bool * unlocked)9611 ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, struct iov_iter *iter,
9612 				   u64 start, u64 lockend,
9613 				   struct extent_state **cached_state,
9614 				   u64 disk_bytenr, u64 disk_io_size,
9615 				   size_t count, bool compressed, bool *unlocked)
9616 {
9617 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
9618 	struct extent_io_tree *io_tree = &inode->io_tree;
9619 	struct page **pages;
9620 	unsigned long nr_pages, i;
9621 	u64 cur;
9622 	size_t page_offset;
9623 	ssize_t ret;
9624 
9625 	nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE);
9626 	pages = kzalloc_objs(struct page *, nr_pages, GFP_NOFS);
9627 	if (!pages)
9628 		return -ENOMEM;
9629 	ret = btrfs_alloc_page_array(nr_pages, pages, false);
9630 	if (ret) {
9631 		ret = -ENOMEM;
9632 		goto out;
9633 		}
9634 
9635 	ret = btrfs_encoded_read_regular_fill_pages(inode, disk_bytenr,
9636 						    disk_io_size, pages, NULL);
9637 	if (ret)
9638 		goto out;
9639 
9640 	btrfs_unlock_extent(io_tree, start, lockend, cached_state);
9641 	btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9642 	*unlocked = true;
9643 
9644 	if (compressed) {
9645 		i = 0;
9646 		page_offset = 0;
9647 	} else {
9648 		i = (iocb->ki_pos - start) >> PAGE_SHIFT;
9649 		page_offset = (iocb->ki_pos - start) & (PAGE_SIZE - 1);
9650 	}
9651 	cur = 0;
9652 	while (cur < count) {
9653 		size_t bytes = min_t(size_t, count - cur,
9654 				     PAGE_SIZE - page_offset);
9655 
9656 		if (copy_page_to_iter(pages[i], page_offset, bytes,
9657 				      iter) != bytes) {
9658 			ret = -EFAULT;
9659 			goto out;
9660 		}
9661 		i++;
9662 		cur += bytes;
9663 		page_offset = 0;
9664 	}
9665 	ret = count;
9666 out:
9667 	for (i = 0; i < nr_pages; i++) {
9668 		if (pages[i])
9669 			__free_page(pages[i]);
9670 	}
9671 	kfree(pages);
9672 	return ret;
9673 }
9674 
btrfs_encoded_read(struct kiocb * iocb,struct iov_iter * iter,struct btrfs_ioctl_encoded_io_args * encoded,struct extent_state ** cached_state,u64 * disk_bytenr,u64 * disk_io_size)9675 ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
9676 			   struct btrfs_ioctl_encoded_io_args *encoded,
9677 			   struct extent_state **cached_state,
9678 			   u64 *disk_bytenr, u64 *disk_io_size)
9679 {
9680 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
9681 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
9682 	struct extent_io_tree *io_tree = &inode->io_tree;
9683 	ssize_t ret;
9684 	size_t count = iov_iter_count(iter);
9685 	u64 start, lockend;
9686 	struct extent_map *em;
9687 	const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
9688 	bool unlocked = false;
9689 
9690 	file_accessed(iocb->ki_filp);
9691 
9692 	ret = btrfs_inode_lock(inode,
9693 			       BTRFS_ILOCK_SHARED | (nowait ? BTRFS_ILOCK_TRY : 0));
9694 	if (ret)
9695 		return ret;
9696 
9697 	if (iocb->ki_pos >= inode->vfs_inode.i_size) {
9698 		btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9699 		return 0;
9700 	}
9701 	start = ALIGN_DOWN(iocb->ki_pos, fs_info->sectorsize);
9702 	/*
9703 	 * We don't know how long the extent containing iocb->ki_pos is, but if
9704 	 * it's compressed we know that it won't be longer than this.
9705 	 */
9706 	lockend = start + BTRFS_MAX_UNCOMPRESSED - 1;
9707 
9708 	if (nowait) {
9709 		struct btrfs_ordered_extent *ordered;
9710 
9711 		if (filemap_range_needs_writeback(inode->vfs_inode.i_mapping,
9712 						  start, lockend)) {
9713 			ret = -EAGAIN;
9714 			goto out_unlock_inode;
9715 		}
9716 
9717 		if (!btrfs_try_lock_extent(io_tree, start, lockend, cached_state)) {
9718 			ret = -EAGAIN;
9719 			goto out_unlock_inode;
9720 		}
9721 
9722 		ordered = btrfs_lookup_ordered_range(inode, start,
9723 						     lockend - start + 1);
9724 		if (ordered) {
9725 			btrfs_put_ordered_extent(ordered);
9726 			btrfs_unlock_extent(io_tree, start, lockend, cached_state);
9727 			ret = -EAGAIN;
9728 			goto out_unlock_inode;
9729 		}
9730 	} else {
9731 		for (;;) {
9732 			struct btrfs_ordered_extent *ordered;
9733 
9734 			ret = btrfs_wait_ordered_range(inode, start,
9735 						       lockend - start + 1);
9736 			if (ret)
9737 				goto out_unlock_inode;
9738 
9739 			btrfs_lock_extent(io_tree, start, lockend, cached_state);
9740 			ordered = btrfs_lookup_ordered_range(inode, start,
9741 							     lockend - start + 1);
9742 			if (!ordered)
9743 				break;
9744 			btrfs_put_ordered_extent(ordered);
9745 			btrfs_unlock_extent(io_tree, start, lockend, cached_state);
9746 			cond_resched();
9747 		}
9748 	}
9749 
9750 	em = btrfs_get_extent(inode, NULL, start, lockend - start + 1);
9751 	if (IS_ERR(em)) {
9752 		ret = PTR_ERR(em);
9753 		goto out_unlock_extent;
9754 	}
9755 
9756 	if (em->disk_bytenr == EXTENT_MAP_INLINE) {
9757 		u64 extent_start = em->start;
9758 
9759 		/*
9760 		 * For inline extents we get everything we need out of the
9761 		 * extent item.
9762 		 */
9763 		btrfs_free_extent_map(em);
9764 		em = NULL;
9765 		ret = btrfs_encoded_read_inline(iocb, iter, start, lockend,
9766 						cached_state, extent_start,
9767 						count, encoded, &unlocked);
9768 		goto out_unlock_extent;
9769 	}
9770 
9771 	/*
9772 	 * We only want to return up to EOF even if the extent extends beyond
9773 	 * that.
9774 	 */
9775 	encoded->len = min_t(u64, btrfs_extent_map_end(em),
9776 			     inode->vfs_inode.i_size) - iocb->ki_pos;
9777 	if (em->disk_bytenr == EXTENT_MAP_HOLE ||
9778 	    (em->flags & EXTENT_FLAG_PREALLOC)) {
9779 		*disk_bytenr = EXTENT_MAP_HOLE;
9780 		count = min_t(u64, count, encoded->len);
9781 		encoded->len = count;
9782 		encoded->unencoded_len = count;
9783 	} else if (btrfs_extent_map_is_compressed(em)) {
9784 		*disk_bytenr = em->disk_bytenr;
9785 		/*
9786 		 * Bail if the buffer isn't large enough to return the whole
9787 		 * compressed extent.
9788 		 */
9789 		if (em->disk_num_bytes > count) {
9790 			ret = -ENOBUFS;
9791 			goto out_em;
9792 		}
9793 		*disk_io_size = em->disk_num_bytes;
9794 		count = em->disk_num_bytes;
9795 		encoded->unencoded_len = em->ram_bytes;
9796 		encoded->unencoded_offset = iocb->ki_pos - (em->start - em->offset);
9797 		ret = btrfs_encoded_io_compression_from_extent(fs_info,
9798 					       btrfs_extent_map_compression(em));
9799 		if (ret < 0)
9800 			goto out_em;
9801 		encoded->compression = ret;
9802 	} else {
9803 		*disk_bytenr = btrfs_extent_map_block_start(em) + (start - em->start);
9804 		if (encoded->len > count)
9805 			encoded->len = count;
9806 		/*
9807 		 * Don't read beyond what we locked. This also limits the page
9808 		 * allocations that we'll do.
9809 		 */
9810 		*disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start;
9811 		count = start + *disk_io_size - iocb->ki_pos;
9812 		encoded->len = count;
9813 		encoded->unencoded_len = count;
9814 		*disk_io_size = ALIGN(*disk_io_size, fs_info->sectorsize);
9815 	}
9816 	btrfs_free_extent_map(em);
9817 	em = NULL;
9818 
9819 	if (*disk_bytenr == EXTENT_MAP_HOLE) {
9820 		btrfs_unlock_extent(io_tree, start, lockend, cached_state);
9821 		btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9822 		unlocked = true;
9823 		ret = iov_iter_zero(count, iter);
9824 		if (ret != count)
9825 			ret = -EFAULT;
9826 	} else {
9827 		ret = -EIOCBQUEUED;
9828 		goto out_unlock_extent;
9829 	}
9830 
9831 out_em:
9832 	btrfs_free_extent_map(em);
9833 out_unlock_extent:
9834 	/* Leave inode and extent locked if we need to do a read. */
9835 	if (!unlocked && ret != -EIOCBQUEUED)
9836 		btrfs_unlock_extent(io_tree, start, lockend, cached_state);
9837 out_unlock_inode:
9838 	if (!unlocked && ret != -EIOCBQUEUED)
9839 		btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9840 	return ret;
9841 }
9842 
btrfs_do_encoded_write(struct kiocb * iocb,struct iov_iter * from,const struct btrfs_ioctl_encoded_io_args * encoded)9843 ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
9844 			       const struct btrfs_ioctl_encoded_io_args *encoded)
9845 {
9846 	struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
9847 	struct btrfs_root *root = inode->root;
9848 	struct btrfs_fs_info *fs_info = root->fs_info;
9849 	struct extent_io_tree *io_tree = &inode->io_tree;
9850 	struct extent_changeset *data_reserved = NULL;
9851 	struct extent_state *cached_state = NULL;
9852 	struct btrfs_ordered_extent *ordered;
9853 	struct btrfs_file_extent file_extent;
9854 	struct compressed_bio *cb = NULL;
9855 	int compression;
9856 	size_t orig_count;
9857 	const u32 min_folio_size = btrfs_min_folio_size(fs_info);
9858 	u64 start, end;
9859 	u64 num_bytes, ram_bytes, disk_num_bytes;
9860 	struct btrfs_key ins;
9861 	bool extent_reserved = false;
9862 	struct extent_map *em;
9863 	ssize_t ret;
9864 
9865 	switch (encoded->compression) {
9866 	case BTRFS_ENCODED_IO_COMPRESSION_ZLIB:
9867 		compression = BTRFS_COMPRESS_ZLIB;
9868 		break;
9869 	case BTRFS_ENCODED_IO_COMPRESSION_ZSTD:
9870 		compression = BTRFS_COMPRESS_ZSTD;
9871 		break;
9872 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K:
9873 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K:
9874 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K:
9875 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K:
9876 	case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K:
9877 		/* The sector size must match for LZO. */
9878 		if (encoded->compression -
9879 		    BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 12 !=
9880 		    fs_info->sectorsize_bits)
9881 			return -EINVAL;
9882 		compression = BTRFS_COMPRESS_LZO;
9883 		break;
9884 	default:
9885 		return -EINVAL;
9886 	}
9887 	if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE)
9888 		return -EINVAL;
9889 
9890 	/*
9891 	 * Compressed extents should always have checksums, so error out if we
9892 	 * have a NOCOW file or inode was created while mounted with NODATASUM.
9893 	 */
9894 	if (inode->flags & BTRFS_INODE_NODATASUM)
9895 		return -EINVAL;
9896 
9897 	orig_count = iov_iter_count(from);
9898 
9899 	/* The extent size must be sane. */
9900 	if (encoded->unencoded_len > BTRFS_MAX_UNCOMPRESSED ||
9901 	    orig_count > BTRFS_MAX_COMPRESSED || orig_count == 0)
9902 		return -EINVAL;
9903 
9904 	/*
9905 	 * The compressed data must be smaller than the decompressed data.
9906 	 *
9907 	 * It's of course possible for data to compress to larger or the same
9908 	 * size, but the buffered I/O path falls back to no compression for such
9909 	 * data, and we don't want to break any assumptions by creating these
9910 	 * extents.
9911 	 *
9912 	 * Note that this is less strict than the current check we have that the
9913 	 * compressed data must be at least one sector smaller than the
9914 	 * decompressed data. We only want to enforce the weaker requirement
9915 	 * from old kernels that it is at least one byte smaller.
9916 	 */
9917 	if (orig_count >= encoded->unencoded_len)
9918 		return -EINVAL;
9919 
9920 	/* The extent must start on a sector boundary. */
9921 	start = iocb->ki_pos;
9922 	if (!IS_ALIGNED(start, fs_info->sectorsize))
9923 		return -EINVAL;
9924 
9925 	/*
9926 	 * The extent must end on a sector boundary. However, we allow a write
9927 	 * which ends at or extends i_size to have an unaligned length; we round
9928 	 * up the extent size and set i_size to the unaligned end.
9929 	 */
9930 	if (start + encoded->len < inode->vfs_inode.i_size &&
9931 	    !IS_ALIGNED(start + encoded->len, fs_info->sectorsize))
9932 		return -EINVAL;
9933 
9934 	/* Finally, the offset in the unencoded data must be sector-aligned. */
9935 	if (!IS_ALIGNED(encoded->unencoded_offset, fs_info->sectorsize))
9936 		return -EINVAL;
9937 
9938 	num_bytes = ALIGN(encoded->len, fs_info->sectorsize);
9939 	ram_bytes = ALIGN(encoded->unencoded_len, fs_info->sectorsize);
9940 	end = start + num_bytes - 1;
9941 
9942 	/*
9943 	 * If the extent cannot be inline, the compressed data on disk must be
9944 	 * sector-aligned. For convenience, we extend it with zeroes if it
9945 	 * isn't.
9946 	 */
9947 	disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize);
9948 
9949 	cb = btrfs_alloc_compressed_write(inode, start, num_bytes);
9950 	for (int i = 0; i * min_folio_size < disk_num_bytes; i++) {
9951 		struct folio *folio;
9952 		size_t bytes = min(min_folio_size, iov_iter_count(from));
9953 		char *kaddr;
9954 
9955 		folio = btrfs_alloc_compr_folio(fs_info);
9956 		if (!folio) {
9957 			ret = -ENOMEM;
9958 			goto out_cb;
9959 		}
9960 		kaddr = kmap_local_folio(folio, 0);
9961 		ret = copy_from_iter(kaddr, bytes, from);
9962 		kunmap_local(kaddr);
9963 		if (ret != bytes) {
9964 			folio_put(folio);
9965 			ret = -EFAULT;
9966 			goto out_cb;
9967 		}
9968 		if (bytes < min_folio_size)
9969 			folio_zero_range(folio, bytes, min_folio_size - bytes);
9970 		ret = bio_add_folio(&cb->bbio.bio, folio, folio_size(folio), 0);
9971 		if (unlikely(!ret)) {
9972 			folio_put(folio);
9973 			ret = -EINVAL;
9974 			goto out_cb;
9975 		}
9976 	}
9977 	ASSERT(cb->bbio.bio.bi_iter.bi_size == disk_num_bytes);
9978 
9979 	for (;;) {
9980 		ret = btrfs_wait_ordered_range(inode, start, num_bytes);
9981 		if (ret)
9982 			goto out_cb;
9983 		ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping,
9984 						    start >> PAGE_SHIFT,
9985 						    end >> PAGE_SHIFT);
9986 		if (ret)
9987 			goto out_cb;
9988 		btrfs_lock_extent(io_tree, start, end, &cached_state);
9989 		ordered = btrfs_lookup_ordered_range(inode, start, num_bytes);
9990 		if (!ordered &&
9991 		    !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end))
9992 			break;
9993 		if (ordered)
9994 			btrfs_put_ordered_extent(ordered);
9995 		btrfs_unlock_extent(io_tree, start, end, &cached_state);
9996 		cond_resched();
9997 	}
9998 
9999 	/*
10000 	 * We don't use the higher-level delalloc space functions because our
10001 	 * num_bytes and disk_num_bytes are different.
10002 	 */
10003 	ret = btrfs_alloc_data_chunk_ondemand(inode, disk_num_bytes);
10004 	if (ret)
10005 		goto out_unlock;
10006 	ret = btrfs_qgroup_reserve_data(inode, &data_reserved, start, num_bytes);
10007 	if (ret)
10008 		goto out_free_data_space;
10009 	ret = btrfs_delalloc_reserve_metadata(inode, num_bytes, disk_num_bytes,
10010 					      false);
10011 	if (ret)
10012 		goto out_qgroup_free_data;
10013 
10014 	/* Try an inline extent first. */
10015 	if (encoded->unencoded_len == encoded->len &&
10016 	    encoded->unencoded_offset == 0 &&
10017 	    can_cow_file_range_inline(inode, start, encoded->len, orig_count)) {
10018 		ret = __cow_file_range_inline(inode, encoded->len,
10019 					      orig_count, compression,
10020 					      bio_first_folio_all(&cb->bbio.bio),
10021 					      true);
10022 		if (ret <= 0) {
10023 			if (ret == 0)
10024 				ret = orig_count;
10025 			goto out_delalloc_release;
10026 		}
10027 	}
10028 
10029 	ret = btrfs_reserve_extent(root, disk_num_bytes, disk_num_bytes,
10030 				   disk_num_bytes, 0, 0, &ins, true, true);
10031 	if (ret)
10032 		goto out_delalloc_release;
10033 	extent_reserved = true;
10034 
10035 	file_extent.disk_bytenr = ins.objectid;
10036 	file_extent.disk_num_bytes = ins.offset;
10037 	file_extent.num_bytes = num_bytes;
10038 	file_extent.ram_bytes = ram_bytes;
10039 	file_extent.offset = encoded->unencoded_offset;
10040 	file_extent.compression = compression;
10041 	em = btrfs_create_io_em(inode, start, &file_extent, BTRFS_ORDERED_COMPRESSED);
10042 	if (IS_ERR(em)) {
10043 		ret = PTR_ERR(em);
10044 		goto out_free_reserved;
10045 	}
10046 	btrfs_free_extent_map(em);
10047 
10048 	ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
10049 				       (1U << BTRFS_ORDERED_ENCODED) |
10050 				       (1U << BTRFS_ORDERED_COMPRESSED));
10051 	if (IS_ERR(ordered)) {
10052 		btrfs_drop_extent_map_range(inode, start, end, false);
10053 		ret = PTR_ERR(ordered);
10054 		goto out_free_reserved;
10055 	}
10056 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10057 
10058 	if (start + encoded->len > inode->vfs_inode.i_size)
10059 		i_size_write(&inode->vfs_inode, start + encoded->len);
10060 
10061 	btrfs_unlock_extent(io_tree, start, end, &cached_state);
10062 
10063 	btrfs_delalloc_release_extents(inode, num_bytes);
10064 
10065 	btrfs_submit_compressed_write(ordered, cb);
10066 	ret = orig_count;
10067 	goto out;
10068 
10069 out_free_reserved:
10070 	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10071 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, true);
10072 out_delalloc_release:
10073 	btrfs_delalloc_release_extents(inode, num_bytes);
10074 	btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0);
10075 out_qgroup_free_data:
10076 	if (ret < 0)
10077 		btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes, NULL);
10078 out_free_data_space:
10079 	/*
10080 	 * If btrfs_reserve_extent() succeeded, then we already decremented
10081 	 * bytes_may_use.
10082 	 */
10083 	if (!extent_reserved)
10084 		btrfs_free_reserved_data_space_noquota(inode, disk_num_bytes);
10085 out_unlock:
10086 	btrfs_unlock_extent(io_tree, start, end, &cached_state);
10087 out_cb:
10088 	if (cb)
10089 		cleanup_compressed_bio(cb);
10090 out:
10091 	if (ret >= 0)
10092 		iocb->ki_pos += encoded->len;
10093 	return ret;
10094 }
10095 
10096 #ifdef CONFIG_SWAP
10097 /*
10098  * Add an entry indicating a block group or device which is pinned by a
10099  * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a
10100  * negative errno on failure.
10101  */
btrfs_add_swapfile_pin(struct inode * inode,void * ptr,bool is_block_group)10102 static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr,
10103 				  bool is_block_group)
10104 {
10105 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
10106 	struct btrfs_swapfile_pin *sp, *entry;
10107 	struct rb_node **p;
10108 	struct rb_node *parent = NULL;
10109 
10110 	sp = kmalloc_obj(*sp, GFP_NOFS);
10111 	if (!sp)
10112 		return -ENOMEM;
10113 	sp->ptr = ptr;
10114 	sp->inode = inode;
10115 	sp->is_block_group = is_block_group;
10116 	sp->bg_extent_count = 1;
10117 
10118 	spin_lock(&fs_info->swapfile_pins_lock);
10119 	p = &fs_info->swapfile_pins.rb_node;
10120 	while (*p) {
10121 		parent = *p;
10122 		entry = rb_entry(parent, struct btrfs_swapfile_pin, node);
10123 		if (sp->ptr < entry->ptr ||
10124 		    (sp->ptr == entry->ptr && sp->inode < entry->inode)) {
10125 			p = &(*p)->rb_left;
10126 		} else if (sp->ptr > entry->ptr ||
10127 			   (sp->ptr == entry->ptr && sp->inode > entry->inode)) {
10128 			p = &(*p)->rb_right;
10129 		} else {
10130 			if (is_block_group)
10131 				entry->bg_extent_count++;
10132 			spin_unlock(&fs_info->swapfile_pins_lock);
10133 			kfree(sp);
10134 			return 1;
10135 		}
10136 	}
10137 	rb_link_node(&sp->node, parent, p);
10138 	rb_insert_color(&sp->node, &fs_info->swapfile_pins);
10139 	spin_unlock(&fs_info->swapfile_pins_lock);
10140 	return 0;
10141 }
10142 
10143 /* Free all of the entries pinned by this swapfile. */
btrfs_free_swapfile_pins(struct inode * inode)10144 static void btrfs_free_swapfile_pins(struct inode *inode)
10145 {
10146 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
10147 	struct btrfs_swapfile_pin *sp;
10148 	struct rb_node *node, *next;
10149 
10150 	spin_lock(&fs_info->swapfile_pins_lock);
10151 	node = rb_first(&fs_info->swapfile_pins);
10152 	while (node) {
10153 		next = rb_next(node);
10154 		sp = rb_entry(node, struct btrfs_swapfile_pin, node);
10155 		if (sp->inode == inode) {
10156 			rb_erase(&sp->node, &fs_info->swapfile_pins);
10157 			if (sp->is_block_group) {
10158 				btrfs_dec_block_group_swap_extents(sp->ptr,
10159 							   sp->bg_extent_count);
10160 				btrfs_put_block_group(sp->ptr);
10161 			}
10162 			kfree(sp);
10163 		}
10164 		node = next;
10165 	}
10166 	spin_unlock(&fs_info->swapfile_pins_lock);
10167 }
10168 
10169 struct btrfs_swap_info {
10170 	u64 start;
10171 	u64 block_start;
10172 	u64 block_len;
10173 	u64 lowest_ppage;
10174 	u64 highest_ppage;
10175 	unsigned long nr_pages;
10176 	int nr_extents;
10177 };
10178 
btrfs_add_swap_extent(struct swap_info_struct * sis,struct btrfs_swap_info * bsi)10179 static int btrfs_add_swap_extent(struct swap_info_struct *sis,
10180 				 struct btrfs_swap_info *bsi)
10181 {
10182 	unsigned long nr_pages;
10183 	unsigned long max_pages;
10184 	u64 first_ppage, first_ppage_reported, next_ppage;
10185 	int ret;
10186 
10187 	/*
10188 	 * Our swapfile may have had its size extended after the swap header was
10189 	 * written. In that case activating the swapfile should not go beyond
10190 	 * the max size set in the swap header.
10191 	 */
10192 	if (bsi->nr_pages >= sis->max)
10193 		return 0;
10194 
10195 	max_pages = sis->max - bsi->nr_pages;
10196 	first_ppage = PAGE_ALIGN(bsi->block_start) >> PAGE_SHIFT;
10197 	next_ppage = PAGE_ALIGN_DOWN(bsi->block_start + bsi->block_len) >> PAGE_SHIFT;
10198 
10199 	if (first_ppage >= next_ppage)
10200 		return 0;
10201 	nr_pages = next_ppage - first_ppage;
10202 	nr_pages = min(nr_pages, max_pages);
10203 
10204 	first_ppage_reported = first_ppage;
10205 	if (bsi->start == 0)
10206 		first_ppage_reported++;
10207 	if (bsi->lowest_ppage > first_ppage_reported)
10208 		bsi->lowest_ppage = first_ppage_reported;
10209 	if (bsi->highest_ppage < (next_ppage - 1))
10210 		bsi->highest_ppage = next_ppage - 1;
10211 
10212 	ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage);
10213 	if (ret < 0)
10214 		return ret;
10215 	bsi->nr_extents += ret;
10216 	bsi->nr_pages += nr_pages;
10217 	return 0;
10218 }
10219 
btrfs_swap_deactivate(struct file * file)10220 static void btrfs_swap_deactivate(struct file *file)
10221 {
10222 	struct inode *inode = file_inode(file);
10223 
10224 	btrfs_free_swapfile_pins(inode);
10225 	atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles);
10226 }
10227 
btrfs_swap_activate(struct swap_info_struct * sis,struct file * file,sector_t * span)10228 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
10229 			       sector_t *span)
10230 {
10231 	struct inode *inode = file_inode(file);
10232 	struct btrfs_root *root = BTRFS_I(inode)->root;
10233 	struct btrfs_fs_info *fs_info = root->fs_info;
10234 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
10235 	struct extent_state *cached_state = NULL;
10236 	struct btrfs_chunk_map *map = NULL;
10237 	struct btrfs_device *device = NULL;
10238 	struct btrfs_swap_info bsi = {
10239 		.lowest_ppage = (sector_t)-1ULL,
10240 	};
10241 	struct btrfs_backref_share_check_ctx *backref_ctx = NULL;
10242 	struct btrfs_path *path = NULL;
10243 	int ret = 0;
10244 	u64 isize;
10245 	u64 prev_extent_end = 0;
10246 
10247 	/*
10248 	 * Acquire the inode's mmap lock to prevent races with memory mapped
10249 	 * writes, as they could happen after we flush delalloc below and before
10250 	 * we lock the extent range further below. The inode was already locked
10251 	 * up in the call chain.
10252 	 */
10253 	btrfs_assert_inode_locked(BTRFS_I(inode));
10254 	down_write(&BTRFS_I(inode)->i_mmap_lock);
10255 
10256 	/*
10257 	 * If the swap file was just created, make sure delalloc is done. If the
10258 	 * file changes again after this, the user is doing something stupid and
10259 	 * we don't really care.
10260 	 */
10261 	ret = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
10262 	if (ret)
10263 		goto out_unlock_mmap;
10264 
10265 	/*
10266 	 * The inode is locked, so these flags won't change after we check them.
10267 	 */
10268 	if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) {
10269 		btrfs_warn(fs_info, "swapfile must not be compressed");
10270 		ret = -EINVAL;
10271 		goto out_unlock_mmap;
10272 	}
10273 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) {
10274 		btrfs_warn(fs_info, "swapfile must not be copy-on-write");
10275 		ret = -EINVAL;
10276 		goto out_unlock_mmap;
10277 	}
10278 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
10279 		btrfs_warn(fs_info, "swapfile must not be checksummed");
10280 		ret = -EINVAL;
10281 		goto out_unlock_mmap;
10282 	}
10283 
10284 	path = btrfs_alloc_path();
10285 	backref_ctx = btrfs_alloc_backref_share_check_ctx();
10286 	if (!path || !backref_ctx) {
10287 		ret = -ENOMEM;
10288 		goto out_unlock_mmap;
10289 	}
10290 
10291 	/*
10292 	 * Balance or device remove/replace/resize can move stuff around from
10293 	 * under us. The exclop protection makes sure they aren't running/won't
10294 	 * run concurrently while we are mapping the swap extents, and
10295 	 * fs_info->swapfile_pins prevents them from running while the swap
10296 	 * file is active and moving the extents. Note that this also prevents
10297 	 * a concurrent device add which isn't actually necessary, but it's not
10298 	 * really worth the trouble to allow it.
10299 	 */
10300 	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) {
10301 		btrfs_warn(fs_info,
10302 	   "cannot activate swapfile while exclusive operation is running");
10303 		ret = -EBUSY;
10304 		goto out_unlock_mmap;
10305 	}
10306 
10307 	/*
10308 	 * Prevent snapshot creation while we are activating the swap file.
10309 	 * We do not want to race with snapshot creation. If snapshot creation
10310 	 * already started before we bumped nr_swapfiles from 0 to 1 and
10311 	 * completes before the first write into the swap file after it is
10312 	 * activated, than that write would fallback to COW.
10313 	 */
10314 	if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) {
10315 		btrfs_exclop_finish(fs_info);
10316 		btrfs_warn(fs_info,
10317 	   "cannot activate swapfile because snapshot creation is in progress");
10318 		ret = -EINVAL;
10319 		goto out_unlock_mmap;
10320 	}
10321 	/*
10322 	 * Snapshots can create extents which require COW even if NODATACOW is
10323 	 * set. We use this counter to prevent snapshots. We must increment it
10324 	 * before walking the extents because we don't want a concurrent
10325 	 * snapshot to run after we've already checked the extents.
10326 	 *
10327 	 * It is possible that subvolume is marked for deletion but still not
10328 	 * removed yet. To prevent this race, we check the root status before
10329 	 * activating the swapfile.
10330 	 */
10331 	spin_lock(&root->root_item_lock);
10332 	if (btrfs_root_dead(root)) {
10333 		spin_unlock(&root->root_item_lock);
10334 
10335 		btrfs_drew_write_unlock(&root->snapshot_lock);
10336 		btrfs_exclop_finish(fs_info);
10337 		btrfs_warn(fs_info,
10338 		"cannot activate swapfile because subvolume %llu is being deleted",
10339 			btrfs_root_id(root));
10340 		ret = -EPERM;
10341 		goto out_unlock_mmap;
10342 	}
10343 	atomic_inc(&root->nr_swapfiles);
10344 	spin_unlock(&root->root_item_lock);
10345 
10346 	isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
10347 
10348 	btrfs_lock_extent(io_tree, 0, isize - 1, &cached_state);
10349 	while (prev_extent_end < isize) {
10350 		struct btrfs_key key;
10351 		struct extent_buffer *leaf;
10352 		struct btrfs_file_extent_item *ei;
10353 		struct btrfs_block_group *bg;
10354 		u64 logical_block_start;
10355 		u64 physical_block_start;
10356 		u64 extent_gen;
10357 		u64 disk_bytenr;
10358 		u64 len;
10359 
10360 		key.objectid = btrfs_ino(BTRFS_I(inode));
10361 		key.type = BTRFS_EXTENT_DATA_KEY;
10362 		key.offset = prev_extent_end;
10363 
10364 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
10365 		if (ret < 0)
10366 			goto out;
10367 
10368 		/*
10369 		 * If key not found it means we have an implicit hole (NO_HOLES
10370 		 * is enabled).
10371 		 */
10372 		if (ret > 0) {
10373 			btrfs_warn(fs_info, "swapfile must not have holes");
10374 			ret = -EINVAL;
10375 			goto out;
10376 		}
10377 
10378 		leaf = path->nodes[0];
10379 		ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
10380 
10381 		if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) {
10382 			/*
10383 			 * It's unlikely we'll ever actually find ourselves
10384 			 * here, as a file small enough to fit inline won't be
10385 			 * big enough to store more than the swap header, but in
10386 			 * case something changes in the future, let's catch it
10387 			 * here rather than later.
10388 			 */
10389 			btrfs_warn(fs_info, "swapfile must not be inline");
10390 			ret = -EINVAL;
10391 			goto out;
10392 		}
10393 
10394 		if (btrfs_file_extent_compression(leaf, ei) != BTRFS_COMPRESS_NONE) {
10395 			btrfs_warn(fs_info, "swapfile must not be compressed");
10396 			ret = -EINVAL;
10397 			goto out;
10398 		}
10399 
10400 		disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
10401 		if (disk_bytenr == 0) {
10402 			btrfs_warn(fs_info, "swapfile must not have holes");
10403 			ret = -EINVAL;
10404 			goto out;
10405 		}
10406 
10407 		logical_block_start = disk_bytenr + btrfs_file_extent_offset(leaf, ei);
10408 		extent_gen = btrfs_file_extent_generation(leaf, ei);
10409 		prev_extent_end = btrfs_file_extent_end(path);
10410 
10411 		if (prev_extent_end > isize)
10412 			len = isize - key.offset;
10413 		else
10414 			len = btrfs_file_extent_num_bytes(leaf, ei);
10415 
10416 		backref_ctx->curr_leaf_bytenr = leaf->start;
10417 
10418 		/*
10419 		 * Don't need the path anymore, release to avoid deadlocks when
10420 		 * calling btrfs_is_data_extent_shared() because when joining a
10421 		 * transaction it can block waiting for the current one's commit
10422 		 * which in turn may be trying to lock the same leaf to flush
10423 		 * delayed items for example.
10424 		 */
10425 		btrfs_release_path(path);
10426 
10427 		ret = btrfs_is_data_extent_shared(BTRFS_I(inode), disk_bytenr,
10428 						  extent_gen, backref_ctx);
10429 		if (ret < 0) {
10430 			goto out;
10431 		} else if (ret > 0) {
10432 			btrfs_warn(fs_info,
10433 				   "swapfile must not be copy-on-write");
10434 			ret = -EINVAL;
10435 			goto out;
10436 		}
10437 
10438 		map = btrfs_get_chunk_map(fs_info, logical_block_start, len);
10439 		if (IS_ERR(map)) {
10440 			ret = PTR_ERR(map);
10441 			goto out;
10442 		}
10443 
10444 		if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
10445 			btrfs_warn(fs_info,
10446 				   "swapfile must have single data profile");
10447 			ret = -EINVAL;
10448 			goto out;
10449 		}
10450 
10451 		if (device == NULL) {
10452 			device = map->stripes[0].dev;
10453 			ret = btrfs_add_swapfile_pin(inode, device, false);
10454 			if (ret == 1)
10455 				ret = 0;
10456 			else if (ret)
10457 				goto out;
10458 		} else if (device != map->stripes[0].dev) {
10459 			btrfs_warn(fs_info, "swapfile must be on one device");
10460 			ret = -EINVAL;
10461 			goto out;
10462 		}
10463 
10464 		physical_block_start = (map->stripes[0].physical +
10465 					(logical_block_start - map->start));
10466 		btrfs_free_chunk_map(map);
10467 		map = NULL;
10468 
10469 		bg = btrfs_lookup_block_group(fs_info, logical_block_start);
10470 		if (!bg) {
10471 			btrfs_warn(fs_info,
10472 			   "could not find block group containing swapfile");
10473 			ret = -EINVAL;
10474 			goto out;
10475 		}
10476 
10477 		if (!btrfs_inc_block_group_swap_extents(bg)) {
10478 			btrfs_warn(fs_info,
10479 			   "block group for swapfile at %llu is read-only%s",
10480 			   bg->start,
10481 			   atomic_read(&fs_info->scrubs_running) ?
10482 				       " (scrub running)" : "");
10483 			btrfs_put_block_group(bg);
10484 			ret = -EINVAL;
10485 			goto out;
10486 		}
10487 
10488 		ret = btrfs_add_swapfile_pin(inode, bg, true);
10489 		if (ret) {
10490 			btrfs_put_block_group(bg);
10491 			if (ret == 1)
10492 				ret = 0;
10493 			else
10494 				goto out;
10495 		}
10496 
10497 		if (bsi.block_len &&
10498 		    bsi.block_start + bsi.block_len == physical_block_start) {
10499 			bsi.block_len += len;
10500 		} else {
10501 			if (bsi.block_len) {
10502 				ret = btrfs_add_swap_extent(sis, &bsi);
10503 				if (ret)
10504 					goto out;
10505 			}
10506 			bsi.start = key.offset;
10507 			bsi.block_start = physical_block_start;
10508 			bsi.block_len = len;
10509 		}
10510 
10511 		if (fatal_signal_pending(current)) {
10512 			ret = -EINTR;
10513 			goto out;
10514 		}
10515 
10516 		cond_resched();
10517 	}
10518 
10519 	if (bsi.block_len)
10520 		ret = btrfs_add_swap_extent(sis, &bsi);
10521 
10522 out:
10523 	if (!IS_ERR_OR_NULL(map))
10524 		btrfs_free_chunk_map(map);
10525 
10526 	btrfs_unlock_extent(io_tree, 0, isize - 1, &cached_state);
10527 
10528 	if (ret)
10529 		btrfs_swap_deactivate(file);
10530 
10531 	btrfs_drew_write_unlock(&root->snapshot_lock);
10532 
10533 	btrfs_exclop_finish(fs_info);
10534 
10535 out_unlock_mmap:
10536 	up_write(&BTRFS_I(inode)->i_mmap_lock);
10537 	btrfs_free_backref_share_ctx(backref_ctx);
10538 	btrfs_free_path(path);
10539 	if (ret)
10540 		return ret;
10541 
10542 	if (device)
10543 		sis->bdev = device->bdev;
10544 	*span = bsi.highest_ppage - bsi.lowest_ppage + 1;
10545 	sis->max = bsi.nr_pages;
10546 	sis->pages = bsi.nr_pages - 1;
10547 	return bsi.nr_extents;
10548 }
10549 #else
btrfs_swap_deactivate(struct file * file)10550 static void btrfs_swap_deactivate(struct file *file)
10551 {
10552 }
10553 
btrfs_swap_activate(struct swap_info_struct * sis,struct file * file,sector_t * span)10554 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
10555 			       sector_t *span)
10556 {
10557 	return -EOPNOTSUPP;
10558 }
10559 #endif
10560 
10561 /*
10562  * Update the number of bytes used in the VFS' inode. When we replace extents in
10563  * a range (clone, dedupe, fallocate's zero range), we must update the number of
10564  * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls
10565  * always get a correct value.
10566  */
btrfs_update_inode_bytes(struct btrfs_inode * inode,const u64 add_bytes,const u64 del_bytes)10567 void btrfs_update_inode_bytes(struct btrfs_inode *inode,
10568 			      const u64 add_bytes,
10569 			      const u64 del_bytes)
10570 {
10571 	if (add_bytes == del_bytes)
10572 		return;
10573 
10574 	spin_lock(&inode->lock);
10575 	if (del_bytes > 0)
10576 		inode_sub_bytes(&inode->vfs_inode, del_bytes);
10577 	if (add_bytes > 0)
10578 		inode_add_bytes(&inode->vfs_inode, add_bytes);
10579 	spin_unlock(&inode->lock);
10580 }
10581 
10582 /*
10583  * Verify that there are no ordered extents for a given file range.
10584  *
10585  * @inode:   The target inode.
10586  * @start:   Start offset of the file range, should be sector size aligned.
10587  * @end:     End offset (inclusive) of the file range, its value +1 should be
10588  *           sector size aligned.
10589  *
10590  * This should typically be used for cases where we locked an inode's VFS lock in
10591  * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode,
10592  * we have flushed all delalloc in the range, we have waited for all ordered
10593  * extents in the range to complete and finally we have locked the file range in
10594  * the inode's io_tree.
10595  */
btrfs_assert_inode_range_clean(struct btrfs_inode * inode,u64 start,u64 end)10596 void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end)
10597 {
10598 	struct btrfs_root *root = inode->root;
10599 	struct btrfs_ordered_extent *ordered;
10600 
10601 	if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
10602 		return;
10603 
10604 	ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start);
10605 	if (ordered) {
10606 		btrfs_err(root->fs_info,
10607 "found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])",
10608 			  start, end, btrfs_ino(inode), btrfs_root_id(root),
10609 			  ordered->file_offset,
10610 			  ordered->file_offset + ordered->num_bytes - 1);
10611 		btrfs_put_ordered_extent(ordered);
10612 	}
10613 
10614 	ASSERT(ordered == NULL);
10615 }
10616 
10617 /*
10618  * Find the first inode with a minimum number.
10619  *
10620  * @root:	The root to search for.
10621  * @min_ino:	The minimum inode number.
10622  *
10623  * Find the first inode in the @root with a number >= @min_ino and return it.
10624  * Returns NULL if no such inode found.
10625  */
btrfs_find_first_inode(struct btrfs_root * root,u64 min_ino)10626 struct btrfs_inode *btrfs_find_first_inode(struct btrfs_root *root, u64 min_ino)
10627 {
10628 	struct btrfs_inode *inode;
10629 	unsigned long from = min_ino;
10630 
10631 	xa_lock(&root->inodes);
10632 	while (true) {
10633 		inode = xa_find(&root->inodes, &from, ULONG_MAX, XA_PRESENT);
10634 		if (!inode)
10635 			break;
10636 		if (igrab(&inode->vfs_inode))
10637 			break;
10638 
10639 		from = btrfs_ino(inode) + 1;
10640 		cond_resched_lock(&root->inodes.xa_lock);
10641 	}
10642 	xa_unlock(&root->inodes);
10643 
10644 	return inode;
10645 }
10646 
10647 static const struct inode_operations btrfs_dir_inode_operations = {
10648 	.getattr	= btrfs_getattr,
10649 	.lookup		= btrfs_lookup,
10650 	.create		= btrfs_create,
10651 	.unlink		= btrfs_unlink,
10652 	.link		= btrfs_link,
10653 	.mkdir		= btrfs_mkdir,
10654 	.rmdir		= btrfs_rmdir,
10655 	.rename		= btrfs_rename2,
10656 	.symlink	= btrfs_symlink,
10657 	.setattr	= btrfs_setattr,
10658 	.mknod		= btrfs_mknod,
10659 	.listxattr	= btrfs_listxattr,
10660 	.permission	= btrfs_permission,
10661 	.get_inode_acl	= btrfs_get_acl,
10662 	.set_acl	= btrfs_set_acl,
10663 	.update_time	= btrfs_update_time,
10664 	.tmpfile        = btrfs_tmpfile,
10665 	.fileattr_get	= btrfs_fileattr_get,
10666 	.fileattr_set	= btrfs_fileattr_set,
10667 };
10668 
10669 static const struct file_operations btrfs_dir_file_operations = {
10670 	.llseek		= btrfs_dir_llseek,
10671 	.read		= generic_read_dir,
10672 	.iterate_shared	= btrfs_real_readdir,
10673 	.open		= btrfs_opendir,
10674 	.unlocked_ioctl	= btrfs_ioctl,
10675 #ifdef CONFIG_COMPAT
10676 	.compat_ioctl	= btrfs_compat_ioctl,
10677 #endif
10678 	.release        = btrfs_release_file,
10679 	.fsync		= btrfs_sync_file,
10680 	.setlease	= generic_setlease,
10681 };
10682 
10683 /*
10684  * btrfs doesn't support the bmap operation because swapfiles
10685  * use bmap to make a mapping of extents in the file.  They assume
10686  * these extents won't change over the life of the file and they
10687  * use the bmap result to do IO directly to the drive.
10688  *
10689  * the btrfs bmap call would return logical addresses that aren't
10690  * suitable for IO and they also will change frequently as COW
10691  * operations happen.  So, swapfile + btrfs == corruption.
10692  *
10693  * For now we're avoiding this by dropping bmap.
10694  */
10695 static const struct address_space_operations btrfs_aops = {
10696 	.read_folio	= btrfs_read_folio,
10697 	.writepages	= btrfs_writepages,
10698 	.readahead	= btrfs_readahead,
10699 	.invalidate_folio = btrfs_invalidate_folio,
10700 	.launder_folio	= btrfs_launder_folio,
10701 	.release_folio	= btrfs_release_folio,
10702 	.migrate_folio	= btrfs_migrate_folio,
10703 	.dirty_folio	= filemap_dirty_folio,
10704 	.error_remove_folio = generic_error_remove_folio,
10705 	.swap_activate	= btrfs_swap_activate,
10706 	.swap_deactivate = btrfs_swap_deactivate,
10707 };
10708 
10709 static const struct inode_operations btrfs_file_inode_operations = {
10710 	.getattr	= btrfs_getattr,
10711 	.setattr	= btrfs_setattr,
10712 	.listxattr      = btrfs_listxattr,
10713 	.permission	= btrfs_permission,
10714 	.fiemap		= btrfs_fiemap,
10715 	.get_inode_acl	= btrfs_get_acl,
10716 	.set_acl	= btrfs_set_acl,
10717 	.update_time	= btrfs_update_time,
10718 	.fileattr_get	= btrfs_fileattr_get,
10719 	.fileattr_set	= btrfs_fileattr_set,
10720 };
10721 static const struct inode_operations btrfs_special_inode_operations = {
10722 	.getattr	= btrfs_getattr,
10723 	.setattr	= btrfs_setattr,
10724 	.permission	= btrfs_permission,
10725 	.listxattr	= btrfs_listxattr,
10726 	.get_inode_acl	= btrfs_get_acl,
10727 	.set_acl	= btrfs_set_acl,
10728 	.update_time	= btrfs_update_time,
10729 };
10730 static const struct inode_operations btrfs_symlink_inode_operations = {
10731 	.get_link	= page_get_link,
10732 	.getattr	= btrfs_getattr,
10733 	.setattr	= btrfs_setattr,
10734 	.permission	= btrfs_permission,
10735 	.listxattr	= btrfs_listxattr,
10736 	.update_time	= btrfs_update_time,
10737 };
10738 
10739 const struct dentry_operations btrfs_dentry_operations = {
10740 	.d_delete	= btrfs_dentry_delete,
10741 };
10742