1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/bio.h>
8 #include <linux/blk-cgroup.h>
9 #include <linux/file.h>
10 #include <linux/filelock.h>
11 #include <linux/fs.h>
12 #include <linux/fs_struct.h>
13 #include <linux/pagemap.h>
14 #include <linux/highmem.h>
15 #include <linux/time.h>
16 #include <linux/init.h>
17 #include <linux/string.h>
18 #include <linux/backing-dev.h>
19 #include <linux/writeback.h>
20 #include <linux/compat.h>
21 #include <linux/xattr.h>
22 #include <linux/posix_acl.h>
23 #include <linux/falloc.h>
24 #include <linux/slab.h>
25 #include <linux/ratelimit.h>
26 #include <linux/btrfs.h>
27 #include <linux/blkdev.h>
28 #include <linux/posix_acl_xattr.h>
29 #include <linux/uio.h>
30 #include <linux/magic.h>
31 #include <linux/iversion.h>
32 #include <linux/swap.h>
33 #include <linux/migrate.h>
34 #include <linux/sched/mm.h>
35 #include <linux/iomap.h>
36 #include <linux/unaligned.h>
37 #include "misc.h"
38 #include "ctree.h"
39 #include "disk-io.h"
40 #include "transaction.h"
41 #include "btrfs_inode.h"
42 #include "ordered-data.h"
43 #include "xattr.h"
44 #include "tree-log.h"
45 #include "bio.h"
46 #include "compression.h"
47 #include "locking.h"
48 #include "props.h"
49 #include "qgroup.h"
50 #include "delalloc-space.h"
51 #include "block-group.h"
52 #include "space-info.h"
53 #include "zoned.h"
54 #include "subpage.h"
55 #include "inode-item.h"
56 #include "fs.h"
57 #include "accessors.h"
58 #include "extent-tree.h"
59 #include "root-tree.h"
60 #include "defrag.h"
61 #include "dir-item.h"
62 #include "file-item.h"
63 #include "uuid-tree.h"
64 #include "ioctl.h"
65 #include "file.h"
66 #include "acl.h"
67 #include "relocation.h"
68 #include "verity.h"
69 #include "super.h"
70 #include "orphan.h"
71 #include "backref.h"
72 #include "raid-stripe-tree.h"
73 #include "fiemap.h"
74 #include "delayed-inode.h"
75
76 #define COW_FILE_RANGE_KEEP_LOCKED (1UL << 0)
77 #define COW_FILE_RANGE_NO_INLINE (1UL << 1)
78
79 struct btrfs_iget_args {
80 u64 ino;
81 struct btrfs_root *root;
82 };
83
84 struct btrfs_rename_ctx {
85 /* Output field. Stores the index number of the old directory entry. */
86 u64 index;
87 };
88
89 /*
90 * Used by data_reloc_print_warning_inode() to pass needed info for filename
91 * resolution and output of error message.
92 */
93 struct data_reloc_warn {
94 struct btrfs_path path;
95 struct btrfs_fs_info *fs_info;
96 u64 extent_item_size;
97 u64 logical;
98 int mirror_num;
99 };
100
101 /*
102 * For the file_extent_tree, we want to hold the inode lock when we lookup and
103 * update the disk_i_size, but lockdep will complain because our io_tree we hold
104 * the tree lock and get the inode lock when setting delalloc. These two things
105 * are unrelated, so make a class for the file_extent_tree so we don't get the
106 * two locking patterns mixed up.
107 */
108 static struct lock_class_key file_extent_tree_class;
109
110 static const struct inode_operations btrfs_dir_inode_operations;
111 static const struct inode_operations btrfs_symlink_inode_operations;
112 static const struct inode_operations btrfs_special_inode_operations;
113 static const struct inode_operations btrfs_file_inode_operations;
114 static const struct address_space_operations btrfs_aops;
115 static const struct file_operations btrfs_dir_file_operations;
116
117 static struct kmem_cache *btrfs_inode_cachep;
118
119 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
120 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback);
121
122 static noinline int run_delalloc_cow(struct btrfs_inode *inode,
123 struct folio *locked_folio, u64 start,
124 u64 end, struct writeback_control *wbc,
125 bool pages_dirty);
126
data_reloc_print_warning_inode(u64 inum,u64 offset,u64 num_bytes,u64 root,void * warn_ctx)127 static int data_reloc_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
128 u64 root, void *warn_ctx)
129 {
130 struct data_reloc_warn *warn = warn_ctx;
131 struct btrfs_fs_info *fs_info = warn->fs_info;
132 struct extent_buffer *eb;
133 struct btrfs_inode_item *inode_item;
134 struct inode_fs_paths *ipath __free(inode_fs_paths) = NULL;
135 struct btrfs_root *local_root;
136 struct btrfs_key key;
137 unsigned int nofs_flag;
138 u32 nlink;
139 int ret;
140
141 local_root = btrfs_get_fs_root(fs_info, root, true);
142 if (IS_ERR(local_root)) {
143 ret = PTR_ERR(local_root);
144 goto err;
145 }
146
147 /* This makes the path point to (inum INODE_ITEM ioff). */
148 key.objectid = inum;
149 key.type = BTRFS_INODE_ITEM_KEY;
150 key.offset = 0;
151
152 ret = btrfs_search_slot(NULL, local_root, &key, &warn->path, 0, 0);
153 if (ret) {
154 btrfs_put_root(local_root);
155 btrfs_release_path(&warn->path);
156 goto err;
157 }
158
159 eb = warn->path.nodes[0];
160 inode_item = btrfs_item_ptr(eb, warn->path.slots[0], struct btrfs_inode_item);
161 nlink = btrfs_inode_nlink(eb, inode_item);
162 btrfs_release_path(&warn->path);
163
164 nofs_flag = memalloc_nofs_save();
165 ipath = init_ipath(4096, local_root, &warn->path);
166 memalloc_nofs_restore(nofs_flag);
167 if (IS_ERR(ipath)) {
168 btrfs_put_root(local_root);
169 ret = PTR_ERR(ipath);
170 ipath = NULL;
171 /*
172 * -ENOMEM, not a critical error, just output an generic error
173 * without filename.
174 */
175 btrfs_warn(fs_info,
176 "checksum error at logical %llu mirror %u root %llu, inode %llu offset %llu",
177 warn->logical, warn->mirror_num, root, inum, offset);
178 return ret;
179 }
180 ret = paths_from_inode(inum, ipath);
181 if (ret < 0) {
182 btrfs_put_root(local_root);
183 goto err;
184 }
185
186 /*
187 * We deliberately ignore the bit ipath might have been too small to
188 * hold all of the paths here
189 */
190 for (int i = 0; i < ipath->fspath->elem_cnt; i++) {
191 btrfs_warn(fs_info,
192 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu length %u links %u (path: %s)",
193 warn->logical, warn->mirror_num, root, inum, offset,
194 fs_info->sectorsize, nlink,
195 (char *)(unsigned long)ipath->fspath->val[i]);
196 }
197
198 btrfs_put_root(local_root);
199 return 0;
200
201 err:
202 btrfs_warn(fs_info,
203 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu, path resolving failed with ret=%d",
204 warn->logical, warn->mirror_num, root, inum, offset, ret);
205
206 return ret;
207 }
208
209 /*
210 * Do extra user-friendly error output (e.g. lookup all the affected files).
211 *
212 * Return true if we succeeded doing the backref lookup.
213 * Return false if such lookup failed, and has to fallback to the old error message.
214 */
print_data_reloc_error(const struct btrfs_inode * inode,u64 file_off,const u8 * csum,const u8 * csum_expected,int mirror_num)215 static void print_data_reloc_error(const struct btrfs_inode *inode, u64 file_off,
216 const u8 *csum, const u8 *csum_expected,
217 int mirror_num)
218 {
219 struct btrfs_fs_info *fs_info = inode->root->fs_info;
220 BTRFS_PATH_AUTO_RELEASE(path);
221 struct btrfs_key found_key = { 0 };
222 struct extent_buffer *eb;
223 struct btrfs_extent_item *ei;
224 const u32 csum_size = fs_info->csum_size;
225 u64 logical;
226 u64 flags;
227 u32 item_size;
228 int ret;
229
230 mutex_lock(&fs_info->reloc_mutex);
231 logical = btrfs_get_reloc_bg_bytenr(fs_info);
232 mutex_unlock(&fs_info->reloc_mutex);
233
234 if (logical == U64_MAX) {
235 btrfs_warn_rl(fs_info, "has data reloc tree but no running relocation");
236 btrfs_warn_rl(fs_info,
237 "csum failed root %lld ino %llu off %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d",
238 btrfs_root_id(inode->root), btrfs_ino(inode), file_off,
239 BTRFS_CSUM_FMT_VALUE(csum_size, csum),
240 BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected),
241 mirror_num);
242 return;
243 }
244
245 logical += file_off;
246 btrfs_warn_rl(fs_info,
247 "csum failed root %lld ino %llu off %llu logical %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d",
248 btrfs_root_id(inode->root),
249 btrfs_ino(inode), file_off, logical,
250 BTRFS_CSUM_FMT_VALUE(csum_size, csum),
251 BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected),
252 mirror_num);
253
254 ret = extent_from_logical(fs_info, logical, &path, &found_key, &flags);
255 if (ret < 0) {
256 btrfs_err_rl(fs_info, "failed to lookup extent item for logical %llu: %d",
257 logical, ret);
258 return;
259 }
260 eb = path.nodes[0];
261 ei = btrfs_item_ptr(eb, path.slots[0], struct btrfs_extent_item);
262 item_size = btrfs_item_size(eb, path.slots[0]);
263 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
264 unsigned long ptr = 0;
265 u64 ref_root;
266 u8 ref_level;
267
268 while (true) {
269 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
270 item_size, &ref_root,
271 &ref_level);
272 if (ret < 0) {
273 btrfs_warn_rl(fs_info,
274 "failed to resolve tree backref for logical %llu: %d",
275 logical, ret);
276 break;
277 }
278 if (ret > 0)
279 break;
280
281 btrfs_warn_rl(fs_info,
282 "csum error at logical %llu mirror %u: metadata %s (level %d) in tree %llu",
283 logical, mirror_num,
284 (ref_level ? "node" : "leaf"),
285 ref_level, ref_root);
286 }
287 } else {
288 struct btrfs_backref_walk_ctx ctx = { 0 };
289 struct data_reloc_warn reloc_warn = { 0 };
290
291 /*
292 * Do not hold the path as later iterate_extent_inodes() call
293 * can be time consuming.
294 */
295 btrfs_release_path(&path);
296
297 ctx.bytenr = found_key.objectid;
298 ctx.extent_item_pos = logical - found_key.objectid;
299 ctx.fs_info = fs_info;
300
301 reloc_warn.logical = logical;
302 reloc_warn.extent_item_size = found_key.offset;
303 reloc_warn.mirror_num = mirror_num;
304 reloc_warn.fs_info = fs_info;
305
306 iterate_extent_inodes(&ctx, true,
307 data_reloc_print_warning_inode, &reloc_warn);
308 }
309 }
310
btrfs_print_data_csum_error(struct btrfs_inode * inode,u64 logical_start,u8 * csum,u8 * csum_expected,int mirror_num)311 static void __cold btrfs_print_data_csum_error(struct btrfs_inode *inode,
312 u64 logical_start, u8 *csum, u8 *csum_expected, int mirror_num)
313 {
314 struct btrfs_root *root = inode->root;
315 const u32 csum_size = root->fs_info->csum_size;
316
317 /* For data reloc tree, it's better to do a backref lookup instead. */
318 if (btrfs_is_data_reloc_root(root))
319 return print_data_reloc_error(inode, logical_start, csum,
320 csum_expected, mirror_num);
321
322 /* Output without objectid, which is more meaningful */
323 if (btrfs_root_id(root) >= BTRFS_LAST_FREE_OBJECTID) {
324 btrfs_warn_rl(root->fs_info,
325 "csum failed root %lld ino %lld off %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d",
326 btrfs_root_id(root), btrfs_ino(inode),
327 logical_start,
328 BTRFS_CSUM_FMT_VALUE(csum_size, csum),
329 BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected),
330 mirror_num);
331 } else {
332 btrfs_warn_rl(root->fs_info,
333 "csum failed root %llu ino %llu off %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d",
334 btrfs_root_id(root), btrfs_ino(inode),
335 logical_start,
336 BTRFS_CSUM_FMT_VALUE(csum_size, csum),
337 BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected),
338 mirror_num);
339 }
340 }
341
342 /*
343 * Lock inode i_rwsem based on arguments passed.
344 *
345 * ilock_flags can have the following bit set:
346 *
347 * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode
348 * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt
349 * return -EAGAIN
350 * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock
351 */
btrfs_inode_lock(struct btrfs_inode * inode,unsigned int ilock_flags)352 int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags)
353 {
354 if (ilock_flags & BTRFS_ILOCK_SHARED) {
355 if (ilock_flags & BTRFS_ILOCK_TRY) {
356 if (!inode_trylock_shared(&inode->vfs_inode))
357 return -EAGAIN;
358 else
359 return 0;
360 }
361 inode_lock_shared(&inode->vfs_inode);
362 } else {
363 if (ilock_flags & BTRFS_ILOCK_TRY) {
364 if (!inode_trylock(&inode->vfs_inode))
365 return -EAGAIN;
366 else
367 return 0;
368 }
369 inode_lock(&inode->vfs_inode);
370 }
371 if (ilock_flags & BTRFS_ILOCK_MMAP)
372 down_write(&inode->i_mmap_lock);
373 return 0;
374 }
375
376 /*
377 * Unlock inode i_rwsem.
378 *
379 * ilock_flags should contain the same bits set as passed to btrfs_inode_lock()
380 * to decide whether the lock acquired is shared or exclusive.
381 */
btrfs_inode_unlock(struct btrfs_inode * inode,unsigned int ilock_flags)382 void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags)
383 {
384 if (ilock_flags & BTRFS_ILOCK_MMAP)
385 up_write(&inode->i_mmap_lock);
386 if (ilock_flags & BTRFS_ILOCK_SHARED)
387 inode_unlock_shared(&inode->vfs_inode);
388 else
389 inode_unlock(&inode->vfs_inode);
390 }
391
392 /*
393 * Cleanup all submitted ordered extents in specified range to handle errors
394 * from the btrfs_run_delalloc_range() callback.
395 *
396 * NOTE: caller must ensure that when an error happens, it can not call
397 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
398 * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
399 * to be released, which we want to happen only when finishing the ordered
400 * extent (btrfs_finish_ordered_io()).
401 */
btrfs_cleanup_ordered_extents(struct btrfs_inode * inode,u64 offset,u64 bytes)402 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
403 u64 offset, u64 bytes)
404 {
405 pgoff_t index = offset >> PAGE_SHIFT;
406 const pgoff_t end_index = (offset + bytes - 1) >> PAGE_SHIFT;
407 struct folio *folio;
408
409 while (index <= end_index) {
410 folio = filemap_get_folio(inode->vfs_inode.i_mapping, index);
411 if (IS_ERR(folio)) {
412 index++;
413 continue;
414 }
415
416 index = folio_next_index(folio);
417 /*
418 * Here we just clear all Ordered bits for every page in the
419 * range, then btrfs_mark_ordered_io_finished() will handle
420 * the ordered extent accounting for the range.
421 */
422 btrfs_folio_clamp_clear_ordered(inode->root->fs_info, folio,
423 offset, bytes);
424 folio_put(folio);
425 }
426
427 return btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes, false);
428 }
429
430 static int btrfs_dirty_inode(struct btrfs_inode *inode);
431
btrfs_init_inode_security(struct btrfs_trans_handle * trans,struct btrfs_new_inode_args * args)432 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
433 struct btrfs_new_inode_args *args)
434 {
435 int ret;
436
437 if (args->default_acl) {
438 ret = __btrfs_set_acl(trans, args->inode, args->default_acl,
439 ACL_TYPE_DEFAULT);
440 if (ret)
441 return ret;
442 }
443 if (args->acl) {
444 ret = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS);
445 if (ret)
446 return ret;
447 }
448 if (!args->default_acl && !args->acl)
449 cache_no_acl(args->inode);
450 return btrfs_xattr_security_init(trans, args->inode, args->dir,
451 &args->dentry->d_name);
452 }
453
454 /*
455 * this does all the hard work for inserting an inline extent into
456 * the btree. The caller should have done a btrfs_drop_extents so that
457 * no overlapping inline items exist in the btree
458 */
insert_inline_extent(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_inode * inode,bool extent_inserted,size_t size,size_t compressed_size,int compress_type,struct folio * compressed_folio,bool update_i_size)459 static int insert_inline_extent(struct btrfs_trans_handle *trans,
460 struct btrfs_path *path,
461 struct btrfs_inode *inode, bool extent_inserted,
462 size_t size, size_t compressed_size,
463 int compress_type,
464 struct folio *compressed_folio,
465 bool update_i_size)
466 {
467 struct btrfs_root *root = inode->root;
468 struct extent_buffer *leaf;
469 const u32 sectorsize = trans->fs_info->sectorsize;
470 char *kaddr;
471 unsigned long ptr;
472 struct btrfs_file_extent_item *ei;
473 int ret;
474 size_t cur_size = size;
475 u64 i_size;
476
477 /*
478 * The decompressed size must still be no larger than a sector. Under
479 * heavy race, we can have size == 0 passed in, but that shouldn't be a
480 * big deal and we can continue the insertion.
481 */
482 ASSERT(size <= sectorsize);
483
484 /*
485 * The compressed size also needs to be no larger than a page.
486 * That's also why we only need one folio as the parameter.
487 */
488 if (compressed_folio) {
489 ASSERT(compressed_size <= sectorsize);
490 ASSERT(compressed_size <= PAGE_SIZE);
491 } else {
492 ASSERT(compressed_size == 0);
493 }
494
495 if (compressed_size && compressed_folio)
496 cur_size = compressed_size;
497
498 if (!extent_inserted) {
499 struct btrfs_key key;
500 size_t datasize;
501
502 key.objectid = btrfs_ino(inode);
503 key.type = BTRFS_EXTENT_DATA_KEY;
504 key.offset = 0;
505
506 datasize = btrfs_file_extent_calc_inline_size(cur_size);
507 ret = btrfs_insert_empty_item(trans, root, path, &key,
508 datasize);
509 if (ret)
510 return ret;
511 }
512 leaf = path->nodes[0];
513 ei = btrfs_item_ptr(leaf, path->slots[0],
514 struct btrfs_file_extent_item);
515 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
516 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
517 btrfs_set_file_extent_encryption(leaf, ei, 0);
518 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
519 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
520 ptr = btrfs_file_extent_inline_start(ei);
521
522 if (compress_type != BTRFS_COMPRESS_NONE) {
523 kaddr = kmap_local_folio(compressed_folio, 0);
524 write_extent_buffer(leaf, kaddr, ptr, compressed_size);
525 kunmap_local(kaddr);
526
527 btrfs_set_file_extent_compression(leaf, ei,
528 compress_type);
529 } else {
530 struct folio *folio;
531
532 folio = filemap_get_folio(inode->vfs_inode.i_mapping, 0);
533 ASSERT(!IS_ERR(folio));
534 btrfs_set_file_extent_compression(leaf, ei, 0);
535 kaddr = kmap_local_folio(folio, 0);
536 write_extent_buffer(leaf, kaddr, ptr, size);
537 kunmap_local(kaddr);
538 folio_put(folio);
539 }
540 btrfs_release_path(path);
541
542 /*
543 * We align size to sectorsize for inline extents just for simplicity
544 * sake.
545 */
546 ret = btrfs_inode_set_file_extent_range(inode, 0,
547 ALIGN(size, root->fs_info->sectorsize));
548 if (ret)
549 return ret;
550
551 /*
552 * We're an inline extent, so nobody can extend the file past i_size
553 * without locking a page we already have locked.
554 *
555 * We must do any i_size and inode updates before we unlock the pages.
556 * Otherwise we could end up racing with unlink.
557 */
558 i_size = i_size_read(&inode->vfs_inode);
559 if (update_i_size && size > i_size) {
560 i_size_write(&inode->vfs_inode, size);
561 i_size = size;
562 }
563 inode->disk_i_size = i_size;
564
565 return 0;
566 }
567
can_cow_file_range_inline(struct btrfs_inode * inode,u64 offset,u64 size,size_t compressed_size)568 static bool can_cow_file_range_inline(struct btrfs_inode *inode,
569 u64 offset, u64 size,
570 size_t compressed_size)
571 {
572 struct btrfs_fs_info *fs_info = inode->root->fs_info;
573 u64 data_len = (compressed_size ?: size);
574
575 /* Inline extents must start at offset 0. */
576 if (offset != 0)
577 return false;
578
579 /*
580 * Even for bs > ps cases, cow_file_range_inline() can only accept a
581 * single folio.
582 *
583 * This can be problematic and cause access beyond page boundary if a
584 * page sized folio is passed into that function.
585 * And encoded write is doing exactly that.
586 * So here limits the inlined extent size to PAGE_SIZE.
587 */
588 if (size > PAGE_SIZE || compressed_size > PAGE_SIZE)
589 return false;
590
591 /* Inline extents are limited to sectorsize. */
592 if (size > fs_info->sectorsize)
593 return false;
594
595 /* We do not allow a non-compressed extent to be as large as block size. */
596 if (data_len >= fs_info->sectorsize)
597 return false;
598
599 /* We cannot exceed the maximum inline data size. */
600 if (data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
601 return false;
602
603 /* We cannot exceed the user specified max_inline size. */
604 if (data_len > fs_info->max_inline)
605 return false;
606
607 /* Inline extents must be the entirety of the file. */
608 if (size < i_size_read(&inode->vfs_inode))
609 return false;
610
611 /* Encrypted file cannot be inlined. */
612 if (IS_ENCRYPTED(&inode->vfs_inode))
613 return false;
614
615 return true;
616 }
617
618 /*
619 * conditionally insert an inline extent into the file. This
620 * does the checks required to make sure the data is small enough
621 * to fit as an inline extent.
622 *
623 * If being used directly, you must have already checked we're allowed to cow
624 * the range by getting true from can_cow_file_range_inline().
625 */
__cow_file_range_inline(struct btrfs_inode * inode,u64 size,size_t compressed_size,int compress_type,struct folio * compressed_folio,bool update_i_size)626 static noinline int __cow_file_range_inline(struct btrfs_inode *inode,
627 u64 size, size_t compressed_size,
628 int compress_type,
629 struct folio *compressed_folio,
630 bool update_i_size)
631 {
632 struct btrfs_drop_extents_args drop_args = { 0 };
633 struct btrfs_root *root = inode->root;
634 struct btrfs_fs_info *fs_info = root->fs_info;
635 struct btrfs_trans_handle *trans = NULL;
636 u64 data_len = (compressed_size ?: size);
637 int ret;
638 struct btrfs_path *path;
639
640 path = btrfs_alloc_path();
641 if (!path) {
642 ret = -ENOMEM;
643 goto out;
644 }
645
646 trans = btrfs_join_transaction(root);
647 if (IS_ERR(trans)) {
648 ret = PTR_ERR(trans);
649 trans = NULL;
650 goto out;
651 }
652 trans->block_rsv = &inode->block_rsv;
653
654 drop_args.path = path;
655 drop_args.start = 0;
656 drop_args.end = fs_info->sectorsize;
657 drop_args.drop_cache = true;
658 drop_args.replace_extent = true;
659 drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(data_len);
660 ret = btrfs_drop_extents(trans, root, inode, &drop_args);
661 if (unlikely(ret)) {
662 btrfs_abort_transaction(trans, ret);
663 goto out;
664 }
665
666 ret = insert_inline_extent(trans, path, inode, drop_args.extent_inserted,
667 size, compressed_size, compress_type,
668 compressed_folio, update_i_size);
669 if (unlikely(ret && ret != -ENOSPC)) {
670 btrfs_abort_transaction(trans, ret);
671 goto out;
672 } else if (ret == -ENOSPC) {
673 ret = 1;
674 goto out;
675 }
676
677 btrfs_update_inode_bytes(inode, size, drop_args.bytes_found);
678 ret = btrfs_update_inode(trans, inode);
679 if (unlikely(ret && ret != -ENOSPC)) {
680 btrfs_abort_transaction(trans, ret);
681 goto out;
682 } else if (ret == -ENOSPC) {
683 ret = 1;
684 goto out;
685 }
686
687 btrfs_set_inode_full_sync(inode);
688 out:
689 /*
690 * Don't forget to free the reserved space, as for inlined extent
691 * it won't count as data extent, free them directly here.
692 * And at reserve time, it's always aligned to sector size, so
693 * just free one sector here.
694 *
695 * If we fallback to non-inline (ret == 1) due to -ENOSPC, then we need
696 * to keep the data reservation.
697 */
698 if (ret <= 0)
699 btrfs_qgroup_free_data(inode, NULL, 0, fs_info->sectorsize, NULL);
700 btrfs_free_path(path);
701 if (trans)
702 btrfs_end_transaction(trans);
703 return ret;
704 }
705
cow_file_range_inline(struct btrfs_inode * inode,struct folio * locked_folio,u64 offset,u64 end,size_t compressed_size,int compress_type,struct folio * compressed_folio,bool update_i_size)706 static noinline int cow_file_range_inline(struct btrfs_inode *inode,
707 struct folio *locked_folio,
708 u64 offset, u64 end,
709 size_t compressed_size,
710 int compress_type,
711 struct folio *compressed_folio,
712 bool update_i_size)
713 {
714 struct extent_state *cached = NULL;
715 unsigned long clear_flags = EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
716 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING | EXTENT_LOCKED;
717 u64 size = min_t(u64, i_size_read(&inode->vfs_inode), end + 1);
718 int ret;
719
720 if (!can_cow_file_range_inline(inode, offset, size, compressed_size))
721 return 1;
722
723 btrfs_lock_extent(&inode->io_tree, offset, end, &cached);
724 ret = __cow_file_range_inline(inode, size, compressed_size,
725 compress_type, compressed_folio,
726 update_i_size);
727 if (ret > 0) {
728 btrfs_unlock_extent(&inode->io_tree, offset, end, &cached);
729 return ret;
730 }
731
732 /*
733 * In the successful case (ret == 0 here), cow_file_range will return 1.
734 *
735 * Quite a bit further up the callstack in extent_writepage(), ret == 1
736 * is treated as a short circuited success and does not unlock the folio,
737 * so we must do it here.
738 *
739 * In the failure case, the locked_folio does get unlocked by
740 * btrfs_folio_end_all_writers, which asserts that it is still locked
741 * at that point, so we must *not* unlock it here.
742 *
743 * The other two callsites in compress_file_range do not have a
744 * locked_folio, so they are not relevant to this logic.
745 */
746 if (ret == 0)
747 locked_folio = NULL;
748
749 extent_clear_unlock_delalloc(inode, offset, end, locked_folio, &cached,
750 clear_flags, PAGE_UNLOCK |
751 PAGE_START_WRITEBACK | PAGE_END_WRITEBACK);
752 return ret;
753 }
754
755 struct async_extent {
756 u64 start;
757 u64 ram_size;
758 struct compressed_bio *cb;
759 struct list_head list;
760 };
761
762 struct async_chunk {
763 struct btrfs_inode *inode;
764 struct folio *locked_folio;
765 u64 start;
766 u64 end;
767 blk_opf_t write_flags;
768 struct list_head extents;
769 struct cgroup_subsys_state *blkcg_css;
770 struct btrfs_work work;
771 struct async_cow *async_cow;
772 };
773
774 struct async_cow {
775 atomic_t num_chunks;
776 struct async_chunk chunks[];
777 };
778
add_async_extent(struct async_chunk * cow,u64 start,u64 ram_size,struct compressed_bio * cb)779 static int add_async_extent(struct async_chunk *cow, u64 start, u64 ram_size,
780 struct compressed_bio *cb)
781 {
782 struct async_extent *async_extent;
783
784 async_extent = kmalloc_obj(*async_extent, GFP_NOFS);
785 if (!async_extent)
786 return -ENOMEM;
787 ASSERT(ram_size < U32_MAX);
788 async_extent->start = start;
789 async_extent->ram_size = ram_size;
790 async_extent->cb = cb;
791 list_add_tail(&async_extent->list, &cow->extents);
792 return 0;
793 }
794
795 /*
796 * Check if the inode needs to be submitted to compression, based on mount
797 * options, defragmentation, properties or heuristics.
798 */
inode_need_compress(struct btrfs_inode * inode,u64 start,u64 end)799 static inline int inode_need_compress(struct btrfs_inode *inode, u64 start,
800 u64 end)
801 {
802 struct btrfs_fs_info *fs_info = inode->root->fs_info;
803
804 if (!btrfs_inode_can_compress(inode)) {
805 DEBUG_WARN("BTRFS: unexpected compression for ino %llu", btrfs_ino(inode));
806 return 0;
807 }
808
809 /*
810 * If the delalloc range is only one fs block and can not be inlined,
811 * do not even bother try compression, as there will be no space saving
812 * and will always fallback to regular write later.
813 */
814 if (start != 0 && end + 1 - start <= fs_info->sectorsize)
815 return 0;
816 /* Defrag ioctl takes precedence over mount options and properties. */
817 if (inode->defrag_compress == BTRFS_DEFRAG_DONT_COMPRESS)
818 return 0;
819 if (BTRFS_COMPRESS_NONE < inode->defrag_compress &&
820 inode->defrag_compress < BTRFS_NR_COMPRESS_TYPES)
821 return 1;
822 /* force compress */
823 if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
824 return 1;
825 /* bad compression ratios */
826 if (inode->flags & BTRFS_INODE_NOCOMPRESS)
827 return 0;
828 if (btrfs_test_opt(fs_info, COMPRESS) ||
829 inode->flags & BTRFS_INODE_COMPRESS ||
830 inode->prop_compress)
831 return btrfs_compress_heuristic(inode, start, end);
832 return 0;
833 }
834
inode_should_defrag(struct btrfs_inode * inode,u64 start,u64 end,u64 num_bytes,u32 small_write)835 static inline void inode_should_defrag(struct btrfs_inode *inode,
836 u64 start, u64 end, u64 num_bytes, u32 small_write)
837 {
838 /* If this is a small write inside eof, kick off a defrag */
839 if (num_bytes < small_write &&
840 (start > 0 || end + 1 < inode->disk_i_size))
841 btrfs_add_inode_defrag(inode, small_write);
842 }
843
extent_range_clear_dirty_for_io(struct btrfs_inode * inode,u64 start,u64 end)844 static int extent_range_clear_dirty_for_io(struct btrfs_inode *inode, u64 start, u64 end)
845 {
846 const pgoff_t end_index = end >> PAGE_SHIFT;
847 struct folio *folio;
848 int ret = 0;
849
850 for (pgoff_t index = start >> PAGE_SHIFT; index <= end_index; index++) {
851 folio = filemap_get_folio(inode->vfs_inode.i_mapping, index);
852 if (IS_ERR(folio)) {
853 if (!ret)
854 ret = PTR_ERR(folio);
855 continue;
856 }
857 btrfs_folio_clamp_clear_dirty(inode->root->fs_info, folio, start,
858 end + 1 - start);
859 folio_put(folio);
860 }
861 return ret;
862 }
863
compressed_bio_last_folio(struct compressed_bio * cb)864 static struct folio *compressed_bio_last_folio(struct compressed_bio *cb)
865 {
866 struct bio *bio = &cb->bbio.bio;
867 struct bio_vec *bvec;
868 phys_addr_t paddr;
869
870 /*
871 * Make sure all folios have the same min_folio_size.
872 *
873 * Otherwise we cannot simply use offset_in_offset(folio, bi_size) to
874 * calculate the end of the last folio.
875 */
876 if (IS_ENABLED(CONFIG_BTRFS_ASSERT)) {
877 struct btrfs_fs_info *fs_info = cb_to_fs_info(cb);
878 const u32 min_folio_size = btrfs_min_folio_size(fs_info);
879 struct folio_iter fi;
880
881 bio_for_each_folio_all(fi, bio)
882 ASSERT(folio_size(fi.folio) == min_folio_size);
883 }
884
885 /* The bio must not be empty. */
886 ASSERT(bio->bi_vcnt);
887
888 bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
889 paddr = page_to_phys(bvec->bv_page) + bvec->bv_offset + bvec->bv_len - 1;
890 return page_folio(phys_to_page(paddr));
891 }
892
zero_last_folio(struct compressed_bio * cb)893 static void zero_last_folio(struct compressed_bio *cb)
894 {
895 struct bio *bio = &cb->bbio.bio;
896 struct folio *last_folio = compressed_bio_last_folio(cb);
897 const u32 bio_size = bio->bi_iter.bi_size;
898 const u32 foffset = offset_in_folio(last_folio, bio_size);
899
900 folio_zero_range(last_folio, foffset, folio_size(last_folio) - foffset);
901 }
902
round_up_last_block(struct compressed_bio * cb,u32 blocksize)903 static void round_up_last_block(struct compressed_bio *cb, u32 blocksize)
904 {
905 struct bio *bio = &cb->bbio.bio;
906 struct folio *last_folio = compressed_bio_last_folio(cb);
907 const u32 bio_size = bio->bi_iter.bi_size;
908 const u32 foffset = offset_in_folio(last_folio, bio_size);
909 bool ret;
910
911 if (IS_ALIGNED(bio_size, blocksize))
912 return;
913
914 ret = bio_add_folio(bio, last_folio, round_up(foffset, blocksize) - foffset, foffset);
915 /* The remaining part should be merged thus never fail. */
916 ASSERT(ret);
917 }
918
919 /*
920 * Work queue call back to started compression on a file and pages.
921 *
922 * This is done inside an ordered work queue, and the compression is spread
923 * across many cpus. The actual IO submission is step two, and the ordered work
924 * queue takes care of making sure that happens in the same order things were
925 * put onto the queue by writepages and friends.
926 *
927 * If this code finds it can't get good compression, it puts an entry onto the
928 * work queue to write the uncompressed bytes. This makes sure that both
929 * compressed inodes and uncompressed inodes are written in the same order that
930 * the flusher thread sent them down.
931 */
compress_file_range(struct btrfs_work * work)932 static void compress_file_range(struct btrfs_work *work)
933 {
934 struct async_chunk *async_chunk =
935 container_of(work, struct async_chunk, work);
936 struct btrfs_inode *inode = async_chunk->inode;
937 struct btrfs_fs_info *fs_info = inode->root->fs_info;
938 struct address_space *mapping = inode->vfs_inode.i_mapping;
939 struct compressed_bio *cb = NULL;
940 const u32 min_folio_size = btrfs_min_folio_size(fs_info);
941 u64 blocksize = fs_info->sectorsize;
942 u64 start = async_chunk->start;
943 u64 end = async_chunk->end;
944 u64 actual_end;
945 u64 i_size;
946 u32 cur_len;
947 int ret = 0;
948 unsigned long total_compressed = 0;
949 unsigned long total_in = 0;
950 unsigned int loff;
951 int compress_type = fs_info->compress_type;
952 int compress_level = fs_info->compress_level;
953
954 if (btrfs_is_shutdown(fs_info))
955 goto cleanup_and_bail_uncompressed;
956
957 inode_should_defrag(inode, start, end, end - start + 1, SZ_16K);
958
959 /*
960 * We need to call clear_page_dirty_for_io on each page in the range.
961 * Otherwise applications with the file mmap'd can wander in and change
962 * the page contents while we are compressing them.
963 */
964 ret = extent_range_clear_dirty_for_io(inode, start, end);
965
966 /*
967 * All the folios should have been locked thus no failure.
968 *
969 * And even if some folios are missing, btrfs_compress_bio()
970 * would handle them correctly, so here just do an ASSERT() check for
971 * early logic errors.
972 */
973 ASSERT(ret == 0);
974
975 /*
976 * We need to save i_size before now because it could change in between
977 * us evaluating the size and assigning it. This is because we lock and
978 * unlock the page in truncate and fallocate, and then modify the i_size
979 * later on.
980 *
981 * The barriers are to emulate READ_ONCE, remove that once i_size_read
982 * does that for us.
983 */
984 barrier();
985 i_size = i_size_read(&inode->vfs_inode);
986 barrier();
987 actual_end = min_t(u64, i_size, end + 1);
988 again:
989 total_in = 0;
990 cur_len = min(end + 1 - start, BTRFS_MAX_UNCOMPRESSED);
991 ret = 0;
992 cb = NULL;
993
994 /*
995 * we don't want to send crud past the end of i_size through
996 * compression, that's just a waste of CPU time. So, if the
997 * end of the file is before the start of our current
998 * requested range of bytes, we bail out to the uncompressed
999 * cleanup code that can deal with all of this.
1000 *
1001 * It isn't really the fastest way to fix things, but this is a
1002 * very uncommon corner.
1003 */
1004 if (actual_end <= start)
1005 goto cleanup_and_bail_uncompressed;
1006
1007 /*
1008 * We do compression for mount -o compress and when the inode has not
1009 * been flagged as NOCOMPRESS. This flag can change at any time if we
1010 * discover bad compression ratios.
1011 */
1012 if (!inode_need_compress(inode, start, end))
1013 goto cleanup_and_bail_uncompressed;
1014
1015 if (0 < inode->defrag_compress && inode->defrag_compress < BTRFS_NR_COMPRESS_TYPES) {
1016 compress_type = inode->defrag_compress;
1017 compress_level = inode->defrag_compress_level;
1018 } else if (inode->prop_compress) {
1019 compress_type = inode->prop_compress;
1020 }
1021
1022 /* Compression level is applied here. */
1023 cb = btrfs_compress_bio(inode, start, cur_len, compress_type,
1024 compress_level, async_chunk->write_flags);
1025 if (IS_ERR(cb)) {
1026 cb = NULL;
1027 goto mark_incompressible;
1028 }
1029
1030 total_compressed = cb->bbio.bio.bi_iter.bi_size;
1031 total_in = cur_len;
1032
1033 /*
1034 * Zero the tail end of the last folio, as we might be sending it down
1035 * to disk.
1036 */
1037 loff = (total_compressed & (min_folio_size - 1));
1038 if (loff)
1039 zero_last_folio(cb);
1040
1041 /*
1042 * Try to create an inline extent.
1043 *
1044 * If we didn't compress the entire range, try to create an uncompressed
1045 * inline extent, else a compressed one.
1046 *
1047 * Check cow_file_range() for why we don't even try to create inline
1048 * extent for the subpage case.
1049 */
1050 if (total_in < actual_end)
1051 ret = cow_file_range_inline(inode, NULL, start, end, 0,
1052 BTRFS_COMPRESS_NONE, NULL, false);
1053 else
1054 ret = cow_file_range_inline(inode, NULL, start, end, total_compressed,
1055 compress_type,
1056 bio_first_folio_all(&cb->bbio.bio), false);
1057 if (ret <= 0) {
1058 cleanup_compressed_bio(cb);
1059 if (ret < 0)
1060 mapping_set_error(mapping, -EIO);
1061 return;
1062 }
1063
1064 /*
1065 * We aren't doing an inline extent. Round the compressed size up to a
1066 * block size boundary so the allocator does sane things.
1067 */
1068 total_compressed = ALIGN(total_compressed, blocksize);
1069 round_up_last_block(cb, blocksize);
1070
1071 /*
1072 * One last check to make sure the compression is really a win, compare
1073 * the page count read with the blocks on disk, compression must free at
1074 * least one sector.
1075 */
1076 total_in = round_up(total_in, fs_info->sectorsize);
1077 if (total_compressed + blocksize > total_in)
1078 goto mark_incompressible;
1079
1080
1081 /*
1082 * The async work queues will take care of doing actual allocation on
1083 * disk for these compressed pages, and will submit the bios.
1084 */
1085 ret = add_async_extent(async_chunk, start, total_in, cb);
1086 BUG_ON(ret);
1087 if (start + total_in < end) {
1088 start += total_in;
1089 cond_resched();
1090 goto again;
1091 }
1092 return;
1093
1094 mark_incompressible:
1095 if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && !inode->prop_compress)
1096 inode->flags |= BTRFS_INODE_NOCOMPRESS;
1097 cleanup_and_bail_uncompressed:
1098 ret = add_async_extent(async_chunk, start, end - start + 1, NULL);
1099 BUG_ON(ret);
1100 if (cb)
1101 cleanup_compressed_bio(cb);
1102 }
1103
submit_uncompressed_range(struct btrfs_inode * inode,struct async_extent * async_extent,struct folio * locked_folio)1104 static void submit_uncompressed_range(struct btrfs_inode *inode,
1105 struct async_extent *async_extent,
1106 struct folio *locked_folio)
1107 {
1108 u64 start = async_extent->start;
1109 u64 end = async_extent->start + async_extent->ram_size - 1;
1110 int ret;
1111 struct writeback_control wbc = {
1112 .sync_mode = WB_SYNC_ALL,
1113 .range_start = start,
1114 .range_end = end,
1115 .no_cgroup_owner = 1,
1116 };
1117
1118 wbc_attach_fdatawrite_inode(&wbc, &inode->vfs_inode);
1119 ret = run_delalloc_cow(inode, locked_folio, start, end,
1120 &wbc, false);
1121 wbc_detach_inode(&wbc);
1122 if (ret < 0) {
1123 if (locked_folio)
1124 btrfs_folio_end_lock(inode->root->fs_info, locked_folio,
1125 start, async_extent->ram_size);
1126 btrfs_err_rl(inode->root->fs_info,
1127 "%s failed, root=%llu inode=%llu start=%llu len=%llu: %d",
1128 __func__, btrfs_root_id(inode->root),
1129 btrfs_ino(inode), start, async_extent->ram_size, ret);
1130 }
1131 }
1132
submit_one_async_extent(struct async_chunk * async_chunk,struct async_extent * async_extent,u64 * alloc_hint)1133 static void submit_one_async_extent(struct async_chunk *async_chunk,
1134 struct async_extent *async_extent,
1135 u64 *alloc_hint)
1136 {
1137 struct btrfs_inode *inode = async_chunk->inode;
1138 struct extent_io_tree *io_tree = &inode->io_tree;
1139 struct btrfs_root *root = inode->root;
1140 struct btrfs_fs_info *fs_info = root->fs_info;
1141 struct btrfs_ordered_extent *ordered;
1142 struct btrfs_file_extent file_extent;
1143 struct btrfs_key ins;
1144 struct folio *locked_folio = NULL;
1145 struct extent_state *cached = NULL;
1146 struct extent_map *em;
1147 int ret = 0;
1148 u32 compressed_size;
1149 u64 start = async_extent->start;
1150 u64 end = async_extent->start + async_extent->ram_size - 1;
1151
1152 if (async_chunk->blkcg_css)
1153 kthread_associate_blkcg(async_chunk->blkcg_css);
1154
1155 /*
1156 * If async_chunk->locked_folio is in the async_extent range, we need to
1157 * handle it.
1158 */
1159 if (async_chunk->locked_folio) {
1160 u64 locked_folio_start = folio_pos(async_chunk->locked_folio);
1161 u64 locked_folio_end = locked_folio_start +
1162 folio_size(async_chunk->locked_folio) - 1;
1163
1164 if (!(start >= locked_folio_end || end <= locked_folio_start))
1165 locked_folio = async_chunk->locked_folio;
1166 }
1167
1168 if (!async_extent->cb) {
1169 submit_uncompressed_range(inode, async_extent, locked_folio);
1170 goto done;
1171 }
1172
1173 compressed_size = async_extent->cb->bbio.bio.bi_iter.bi_size;
1174 ret = btrfs_reserve_extent(root, async_extent->ram_size,
1175 compressed_size, compressed_size,
1176 0, *alloc_hint, &ins, true, true);
1177 if (ret) {
1178 /*
1179 * We can't reserve contiguous space for the compressed size.
1180 * Unlikely, but it's possible that we could have enough
1181 * non-contiguous space for the uncompressed size instead. So
1182 * fall back to uncompressed.
1183 */
1184 submit_uncompressed_range(inode, async_extent, locked_folio);
1185 cleanup_compressed_bio(async_extent->cb);
1186 async_extent->cb = NULL;
1187 goto done;
1188 }
1189
1190 btrfs_lock_extent(io_tree, start, end, &cached);
1191
1192 /* Here we're doing allocation and writeback of the compressed pages */
1193 file_extent.disk_bytenr = ins.objectid;
1194 file_extent.disk_num_bytes = ins.offset;
1195 file_extent.ram_bytes = async_extent->ram_size;
1196 file_extent.num_bytes = async_extent->ram_size;
1197 file_extent.offset = 0;
1198 file_extent.compression = async_extent->cb->compress_type;
1199
1200 async_extent->cb->bbio.bio.bi_iter.bi_sector = ins.objectid >> SECTOR_SHIFT;
1201
1202 em = btrfs_create_io_em(inode, start, &file_extent, BTRFS_ORDERED_COMPRESSED);
1203 if (IS_ERR(em)) {
1204 ret = PTR_ERR(em);
1205 goto out_free_reserve;
1206 }
1207 btrfs_free_extent_map(em);
1208
1209 ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
1210 1U << BTRFS_ORDERED_COMPRESSED);
1211 if (IS_ERR(ordered)) {
1212 btrfs_drop_extent_map_range(inode, start, end, false);
1213 ret = PTR_ERR(ordered);
1214 goto out_free_reserve;
1215 }
1216 async_extent->cb->bbio.ordered = ordered;
1217 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1218
1219 /* Clear dirty, set writeback and unlock the pages. */
1220 extent_clear_unlock_delalloc(inode, start, end,
1221 NULL, &cached, EXTENT_LOCKED | EXTENT_DELALLOC,
1222 PAGE_UNLOCK | PAGE_START_WRITEBACK);
1223 btrfs_submit_bbio(&async_extent->cb->bbio, 0);
1224 async_extent->cb = NULL;
1225
1226 *alloc_hint = ins.objectid + ins.offset;
1227 done:
1228 if (async_chunk->blkcg_css)
1229 kthread_associate_blkcg(NULL);
1230 kfree(async_extent);
1231 return;
1232
1233 out_free_reserve:
1234 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1235 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, true);
1236 mapping_set_error(inode->vfs_inode.i_mapping, -EIO);
1237 extent_clear_unlock_delalloc(inode, start, end,
1238 NULL, &cached,
1239 EXTENT_LOCKED | EXTENT_DELALLOC |
1240 EXTENT_DELALLOC_NEW |
1241 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
1242 PAGE_UNLOCK | PAGE_START_WRITEBACK |
1243 PAGE_END_WRITEBACK);
1244 if (async_extent->cb)
1245 cleanup_compressed_bio(async_extent->cb);
1246 if (async_chunk->blkcg_css)
1247 kthread_associate_blkcg(NULL);
1248 btrfs_debug(fs_info,
1249 "async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d",
1250 btrfs_root_id(root), btrfs_ino(inode), start,
1251 async_extent->ram_size, ret);
1252 kfree(async_extent);
1253 }
1254
btrfs_get_extent_allocation_hint(struct btrfs_inode * inode,u64 start,u64 num_bytes)1255 u64 btrfs_get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
1256 u64 num_bytes)
1257 {
1258 struct extent_map_tree *em_tree = &inode->extent_tree;
1259 struct extent_map *em;
1260 u64 alloc_hint = 0;
1261
1262 read_lock(&em_tree->lock);
1263 em = btrfs_search_extent_mapping(em_tree, start, num_bytes);
1264 if (em) {
1265 /*
1266 * if block start isn't an actual block number then find the
1267 * first block in this inode and use that as a hint. If that
1268 * block is also bogus then just don't worry about it.
1269 */
1270 if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
1271 btrfs_free_extent_map(em);
1272 em = btrfs_search_extent_mapping(em_tree, 0, 0);
1273 if (em && em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
1274 alloc_hint = btrfs_extent_map_block_start(em);
1275 if (em)
1276 btrfs_free_extent_map(em);
1277 } else {
1278 alloc_hint = btrfs_extent_map_block_start(em);
1279 btrfs_free_extent_map(em);
1280 }
1281 }
1282 read_unlock(&em_tree->lock);
1283
1284 return alloc_hint;
1285 }
1286
1287 /*
1288 * Handle COW for one range.
1289 *
1290 * @ins: The key representing the allocated range.
1291 * @file_offset: The file offset of the COW range
1292 * @num_bytes: The expected length of the COW range
1293 * The actually allocated length can be smaller than it.
1294 * @min_alloc_size: The minimal extent size.
1295 * @alloc_hint: The hint for the extent allocator.
1296 * @ret_alloc_size: The COW range handles by this function.
1297 *
1298 * Return 0 if everything is fine and update @ret_alloc_size updated. The
1299 * range is still locked, and caller should unlock the range after everything
1300 * is done or for error handling.
1301 *
1302 * Return <0 for error and @is updated for where the extra cleanup should
1303 * happen. The range [file_offset, file_offset + ret_alloc_size) will be
1304 * cleaned up by this function.
1305 */
cow_one_range(struct btrfs_inode * inode,struct folio * locked_folio,struct btrfs_key * ins,struct extent_state ** cached,u64 file_offset,u32 num_bytes,u32 min_alloc_size,u64 alloc_hint,u32 * ret_alloc_size)1306 static int cow_one_range(struct btrfs_inode *inode, struct folio *locked_folio,
1307 struct btrfs_key *ins, struct extent_state **cached,
1308 u64 file_offset, u32 num_bytes, u32 min_alloc_size,
1309 u64 alloc_hint, u32 *ret_alloc_size)
1310 {
1311 struct btrfs_root *root = inode->root;
1312 struct btrfs_fs_info *fs_info = root->fs_info;
1313 struct btrfs_ordered_extent *ordered;
1314 struct btrfs_file_extent file_extent;
1315 struct extent_map *em;
1316 u32 cur_len = 0;
1317 u64 cur_end;
1318 int ret;
1319
1320 ret = btrfs_reserve_extent(root, num_bytes, num_bytes, min_alloc_size,
1321 0, alloc_hint, ins, true, true);
1322 if (ret < 0) {
1323 *ret_alloc_size = cur_len;
1324 return ret;
1325 }
1326
1327 cur_len = ins->offset;
1328 cur_end = file_offset + cur_len - 1;
1329
1330 file_extent.disk_bytenr = ins->objectid;
1331 file_extent.disk_num_bytes = ins->offset;
1332 file_extent.num_bytes = ins->offset;
1333 file_extent.ram_bytes = ins->offset;
1334 file_extent.offset = 0;
1335 file_extent.compression = BTRFS_COMPRESS_NONE;
1336
1337 /*
1338 * Locked range will be released either during error clean up (inside
1339 * this function or by the caller for previously successful ranges) or
1340 * after the whole range is finished.
1341 */
1342 btrfs_lock_extent(&inode->io_tree, file_offset, cur_end, cached);
1343 em = btrfs_create_io_em(inode, file_offset, &file_extent, BTRFS_ORDERED_REGULAR);
1344 if (IS_ERR(em)) {
1345 ret = PTR_ERR(em);
1346 goto free_reserved;
1347 }
1348 btrfs_free_extent_map(em);
1349
1350 ordered = btrfs_alloc_ordered_extent(inode, file_offset, &file_extent,
1351 1U << BTRFS_ORDERED_REGULAR);
1352 if (IS_ERR(ordered)) {
1353 btrfs_drop_extent_map_range(inode, file_offset, cur_end, false);
1354 ret = PTR_ERR(ordered);
1355 goto free_reserved;
1356 }
1357
1358 if (btrfs_is_data_reloc_root(root)) {
1359 ret = btrfs_reloc_clone_csums(ordered);
1360
1361 /*
1362 * Only drop cache here, and process as normal.
1363 *
1364 * We must not allow extent_clear_unlock_delalloc() at
1365 * free_reserved label to free meta of this ordered extent, as
1366 * its meta should be freed by btrfs_finish_ordered_io().
1367 *
1368 * So we must continue until @start is increased to
1369 * skip current ordered extent.
1370 */
1371 if (ret)
1372 btrfs_drop_extent_map_range(inode, file_offset,
1373 cur_end, false);
1374 }
1375 btrfs_put_ordered_extent(ordered);
1376 btrfs_dec_block_group_reservations(fs_info, ins->objectid);
1377 /*
1378 * Error handling for btrfs_reloc_clone_csums().
1379 *
1380 * Treat the range as finished, thus only clear EXTENT_LOCKED | EXTENT_DELALLOC.
1381 * The accounting will be done by ordered extents.
1382 */
1383 if (unlikely(ret < 0)) {
1384 btrfs_cleanup_ordered_extents(inode, file_offset, cur_len);
1385 extent_clear_unlock_delalloc(inode, file_offset, cur_end, locked_folio, cached,
1386 EXTENT_LOCKED | EXTENT_DELALLOC,
1387 PAGE_UNLOCK | PAGE_START_WRITEBACK |
1388 PAGE_END_WRITEBACK);
1389 mapping_set_error(inode->vfs_inode.i_mapping, -EIO);
1390 }
1391 *ret_alloc_size = cur_len;
1392 return ret;
1393
1394 free_reserved:
1395 /*
1396 * If we have reserved an extent for the current range and failed to
1397 * create the respective extent map or ordered extent, it means that
1398 * when we reserved the extent we decremented the extent's size from
1399 * the data space_info's bytes_may_use counter and
1400 * incremented the space_info's bytes_reserved counter by the same
1401 * amount.
1402 *
1403 * We must make sure extent_clear_unlock_delalloc() does not try
1404 * to decrement again the data space_info's bytes_may_use counter, which
1405 * will be handled by btrfs_free_reserved_extent().
1406 *
1407 * Therefore we do not pass it the flag EXTENT_CLEAR_DATA_RESV, but only
1408 * EXTENT_CLEAR_META_RESV.
1409 */
1410 extent_clear_unlock_delalloc(inode, file_offset, cur_end, locked_folio, cached,
1411 EXTENT_LOCKED | EXTENT_DELALLOC |
1412 EXTENT_DELALLOC_NEW |
1413 EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV,
1414 PAGE_UNLOCK | PAGE_START_WRITEBACK |
1415 PAGE_END_WRITEBACK);
1416 btrfs_qgroup_free_data(inode, NULL, file_offset, cur_len, NULL);
1417 btrfs_dec_block_group_reservations(fs_info, ins->objectid);
1418 btrfs_free_reserved_extent(fs_info, ins->objectid, ins->offset, true);
1419 mapping_set_error(inode->vfs_inode.i_mapping, -EIO);
1420 *ret_alloc_size = cur_len;
1421 /*
1422 * We should not return -EAGAIN where it's a special return code for
1423 * zoned to catch btrfs_reserved_extent().
1424 */
1425 ASSERT(ret != -EAGAIN);
1426 return ret;
1427 }
1428
1429 /*
1430 * when extent_io.c finds a delayed allocation range in the file,
1431 * the call backs end up in this code. The basic idea is to
1432 * allocate extents on disk for the range, and create ordered data structs
1433 * in ram to track those extents.
1434 *
1435 * locked_folio is the folio that writepage had locked already. We use
1436 * it to make sure we don't do extra locks or unlocks.
1437 *
1438 * When this function fails, it unlocks all folios except @locked_folio.
1439 *
1440 * When this function successfully creates an inline extent, it returns 1 and
1441 * unlocks all folios including locked_folio and starts I/O on them.
1442 * (In reality inline extents are limited to a single block, so locked_folio is
1443 * the only folio handled anyway).
1444 *
1445 * When this function succeed and creates a normal extent, the folio locking
1446 * status depends on the passed in flags:
1447 *
1448 * - If COW_FILE_RANGE_KEEP_LOCKED flag is set, all folios are kept locked.
1449 * - Else all folios except for @locked_folio are unlocked.
1450 *
1451 * When a failure happens in the second or later iteration of the
1452 * while-loop, the ordered extents created in previous iterations are cleaned up.
1453 */
cow_file_range(struct btrfs_inode * inode,struct folio * locked_folio,u64 start,u64 end,u64 * done_offset,unsigned long flags)1454 static noinline int cow_file_range(struct btrfs_inode *inode,
1455 struct folio *locked_folio, u64 start,
1456 u64 end, u64 *done_offset,
1457 unsigned long flags)
1458 {
1459 struct btrfs_root *root = inode->root;
1460 struct btrfs_fs_info *fs_info = root->fs_info;
1461 struct extent_state *cached = NULL;
1462 u64 alloc_hint = 0;
1463 u64 orig_start = start;
1464 u64 num_bytes;
1465 u32 min_alloc_size;
1466 u32 blocksize = fs_info->sectorsize;
1467 u32 cur_alloc_size = 0;
1468 struct btrfs_key ins;
1469 unsigned clear_bits;
1470 unsigned long page_ops;
1471 int ret = 0;
1472
1473 if (btrfs_is_shutdown(fs_info)) {
1474 ret = -EIO;
1475 goto out_unlock;
1476 }
1477
1478 if (btrfs_is_free_space_inode(inode)) {
1479 ret = -EINVAL;
1480 goto out_unlock;
1481 }
1482
1483 num_bytes = ALIGN(end - start + 1, blocksize);
1484 num_bytes = max(blocksize, num_bytes);
1485 ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy));
1486
1487 inode_should_defrag(inode, start, end, num_bytes, SZ_64K);
1488
1489 if (!(flags & COW_FILE_RANGE_NO_INLINE)) {
1490 /* lets try to make an inline extent */
1491 ret = cow_file_range_inline(inode, locked_folio, start, end, 0,
1492 BTRFS_COMPRESS_NONE, NULL, false);
1493 if (ret <= 0) {
1494 /*
1495 * We succeeded, return 1 so the caller knows we're done
1496 * with this page and already handled the IO.
1497 *
1498 * If there was an error then cow_file_range_inline() has
1499 * already done the cleanup.
1500 */
1501 if (ret == 0)
1502 ret = 1;
1503 goto done;
1504 }
1505 }
1506
1507 alloc_hint = btrfs_get_extent_allocation_hint(inode, start, num_bytes);
1508
1509 /*
1510 * We're not doing compressed IO, don't unlock the first page (which
1511 * the caller expects to stay locked), don't clear any dirty bits and
1512 * don't set any writeback bits.
1513 *
1514 * Do set the Ordered (Private2) bit so we know this page was properly
1515 * setup for writepage.
1516 */
1517 page_ops = ((flags & COW_FILE_RANGE_KEEP_LOCKED) ? 0 : PAGE_UNLOCK);
1518 page_ops |= PAGE_SET_ORDERED;
1519
1520 /*
1521 * Relocation relies on the relocated extents to have exactly the same
1522 * size as the original extents. Normally writeback for relocation data
1523 * extents follows a NOCOW path because relocation preallocates the
1524 * extents. However, due to an operation such as scrub turning a block
1525 * group to RO mode, it may fallback to COW mode, so we must make sure
1526 * an extent allocated during COW has exactly the requested size and can
1527 * not be split into smaller extents, otherwise relocation breaks and
1528 * fails during the stage where it updates the bytenr of file extent
1529 * items.
1530 */
1531 if (btrfs_is_data_reloc_root(root))
1532 min_alloc_size = num_bytes;
1533 else
1534 min_alloc_size = fs_info->sectorsize;
1535
1536 while (num_bytes > 0) {
1537 ret = cow_one_range(inode, locked_folio, &ins, &cached, start,
1538 num_bytes, min_alloc_size, alloc_hint, &cur_alloc_size);
1539
1540 if (ret == -EAGAIN) {
1541 /*
1542 * cow_one_range() only returns -EAGAIN for zoned
1543 * file systems (from btrfs_reserve_extent()), which
1544 * is an indication that there are
1545 * no active zones to allocate from at the moment.
1546 *
1547 * If this is the first loop iteration, wait for at
1548 * least one zone to finish before retrying the
1549 * allocation. Otherwise ask the caller to write out
1550 * the already allocated blocks before coming back to
1551 * us, or return -ENOSPC if it can't handle retries.
1552 */
1553 ASSERT(btrfs_is_zoned(fs_info));
1554 if (start == orig_start) {
1555 wait_on_bit_io(&inode->root->fs_info->flags,
1556 BTRFS_FS_NEED_ZONE_FINISH,
1557 TASK_UNINTERRUPTIBLE);
1558 continue;
1559 }
1560 if (done_offset) {
1561 /*
1562 * Move @end to the end of the processed range,
1563 * and exit the loop to unlock the processed extents.
1564 */
1565 end = start - 1;
1566 ret = 0;
1567 break;
1568 }
1569 ret = -ENOSPC;
1570 }
1571 if (ret < 0)
1572 goto out_unlock;
1573
1574 /* We should not allocate an extent larger than requested.*/
1575 ASSERT(cur_alloc_size <= num_bytes);
1576
1577 num_bytes -= cur_alloc_size;
1578 alloc_hint = ins.objectid + ins.offset;
1579 start += cur_alloc_size;
1580 cur_alloc_size = 0;
1581 }
1582 extent_clear_unlock_delalloc(inode, orig_start, end, locked_folio, &cached,
1583 EXTENT_LOCKED | EXTENT_DELALLOC, page_ops);
1584 done:
1585 if (done_offset)
1586 *done_offset = end;
1587 return ret;
1588
1589 out_unlock:
1590 /*
1591 * Now, we have three regions to clean up:
1592 *
1593 * |-------(1)----|---(2)---|-------------(3)----------|
1594 * `- orig_start `- start `- start + cur_alloc_size `- end
1595 *
1596 * We process each region below.
1597 */
1598
1599 /*
1600 * For the range (1). We have already instantiated the ordered extents
1601 * for this region, thus we need to cleanup those ordered extents.
1602 * EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV
1603 * are also handled by the ordered extents cleanup.
1604 *
1605 * So here we only clear EXTENT_LOCKED and EXTENT_DELALLOC flag, and
1606 * finish the writeback of the involved folios, which will be never submitted.
1607 */
1608 if (orig_start < start) {
1609 clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC;
1610 page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
1611
1612 if (!locked_folio)
1613 mapping_set_error(inode->vfs_inode.i_mapping, ret);
1614
1615 btrfs_cleanup_ordered_extents(inode, orig_start, start - orig_start);
1616 extent_clear_unlock_delalloc(inode, orig_start, start - 1,
1617 locked_folio, NULL, clear_bits, page_ops);
1618 }
1619
1620 clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
1621 EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
1622 page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
1623
1624 /*
1625 * For the range (2) the error handling is done by cow_one_range() itself.
1626 * Nothing needs to be done.
1627 *
1628 * For the range (3). We never touched the region. In addition to the
1629 * clear_bits above, we add EXTENT_CLEAR_DATA_RESV to release the data
1630 * space_info's bytes_may_use counter, reserved in
1631 * btrfs_check_data_free_space().
1632 */
1633 if (start + cur_alloc_size < end) {
1634 clear_bits |= EXTENT_CLEAR_DATA_RESV;
1635 extent_clear_unlock_delalloc(inode, start + cur_alloc_size,
1636 end, locked_folio,
1637 &cached, clear_bits, page_ops);
1638 btrfs_qgroup_free_data(inode, NULL, start + cur_alloc_size,
1639 end - start - cur_alloc_size + 1, NULL);
1640 }
1641 btrfs_err(fs_info,
1642 "%s failed, root=%llu inode=%llu start=%llu len=%llu cur_offset=%llu cur_alloc_size=%u: %d",
1643 __func__, btrfs_root_id(inode->root),
1644 btrfs_ino(inode), orig_start, end + 1 - orig_start,
1645 start, cur_alloc_size, ret);
1646 return ret;
1647 }
1648
1649 /*
1650 * Phase two of compressed writeback. This is the ordered portion of the code,
1651 * which only gets called in the order the work was queued. We walk all the
1652 * async extents created by compress_file_range and send them down to the disk.
1653 *
1654 * If called with @do_free == true then it'll try to finish the work and free
1655 * the work struct eventually.
1656 */
submit_compressed_extents(struct btrfs_work * work,bool do_free)1657 static noinline void submit_compressed_extents(struct btrfs_work *work, bool do_free)
1658 {
1659 struct async_chunk *async_chunk = container_of(work, struct async_chunk,
1660 work);
1661 struct btrfs_fs_info *fs_info = btrfs_work_owner(work);
1662 struct async_extent *async_extent;
1663 unsigned long nr_pages;
1664 u64 alloc_hint = 0;
1665
1666 if (do_free) {
1667 struct async_cow *async_cow;
1668
1669 btrfs_add_delayed_iput(async_chunk->inode);
1670 if (async_chunk->blkcg_css)
1671 css_put(async_chunk->blkcg_css);
1672
1673 async_cow = async_chunk->async_cow;
1674 if (atomic_dec_and_test(&async_cow->num_chunks))
1675 kvfree(async_cow);
1676 return;
1677 }
1678
1679 nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
1680 PAGE_SHIFT;
1681
1682 while (!list_empty(&async_chunk->extents)) {
1683 async_extent = list_first_entry(&async_chunk->extents,
1684 struct async_extent, list);
1685 list_del(&async_extent->list);
1686 submit_one_async_extent(async_chunk, async_extent, &alloc_hint);
1687 }
1688
1689 /* atomic_sub_return implies a barrier */
1690 if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
1691 5 * SZ_1M)
1692 cond_wake_up_nomb(&fs_info->async_submit_wait);
1693 }
1694
run_delalloc_compressed(struct btrfs_inode * inode,struct folio * locked_folio,u64 start,u64 end,struct writeback_control * wbc)1695 static bool run_delalloc_compressed(struct btrfs_inode *inode,
1696 struct folio *locked_folio, u64 start,
1697 u64 end, struct writeback_control *wbc)
1698 {
1699 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1700 struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc);
1701 struct async_cow *ctx;
1702 struct async_chunk *async_chunk;
1703 unsigned long nr_pages;
1704 u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K);
1705 int i;
1706 unsigned nofs_flag;
1707 const blk_opf_t write_flags = wbc_to_write_flags(wbc);
1708
1709 nofs_flag = memalloc_nofs_save();
1710 ctx = kvmalloc_flex(*ctx, chunks, num_chunks);
1711 memalloc_nofs_restore(nofs_flag);
1712 if (!ctx)
1713 return false;
1714
1715 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags);
1716
1717 async_chunk = ctx->chunks;
1718 atomic_set(&ctx->num_chunks, num_chunks);
1719
1720 for (i = 0; i < num_chunks; i++) {
1721 u64 cur_end = min(end, start + SZ_512K - 1);
1722
1723 /*
1724 * igrab is called higher up in the call chain, take only the
1725 * lightweight reference for the callback lifetime
1726 */
1727 ihold(&inode->vfs_inode);
1728 async_chunk[i].async_cow = ctx;
1729 async_chunk[i].inode = inode;
1730 async_chunk[i].start = start;
1731 async_chunk[i].end = cur_end;
1732 async_chunk[i].write_flags = write_flags;
1733 INIT_LIST_HEAD(&async_chunk[i].extents);
1734
1735 /*
1736 * The locked_folio comes all the way from writepage and its
1737 * the original folio we were actually given. As we spread
1738 * this large delalloc region across multiple async_chunk
1739 * structs, only the first struct needs a pointer to
1740 * locked_folio.
1741 *
1742 * This way we don't need racey decisions about who is supposed
1743 * to unlock it.
1744 */
1745 if (locked_folio) {
1746 /*
1747 * Depending on the compressibility, the pages might or
1748 * might not go through async. We want all of them to
1749 * be accounted against wbc once. Let's do it here
1750 * before the paths diverge. wbc accounting is used
1751 * only for foreign writeback detection and doesn't
1752 * need full accuracy. Just account the whole thing
1753 * against the first page.
1754 */
1755 wbc_account_cgroup_owner(wbc, locked_folio,
1756 cur_end - start);
1757 async_chunk[i].locked_folio = locked_folio;
1758 locked_folio = NULL;
1759 } else {
1760 async_chunk[i].locked_folio = NULL;
1761 }
1762
1763 if (blkcg_css != blkcg_root_css) {
1764 css_get(blkcg_css);
1765 async_chunk[i].blkcg_css = blkcg_css;
1766 async_chunk[i].write_flags |= REQ_BTRFS_CGROUP_PUNT;
1767 } else {
1768 async_chunk[i].blkcg_css = NULL;
1769 }
1770
1771 btrfs_init_work(&async_chunk[i].work, compress_file_range,
1772 submit_compressed_extents);
1773
1774 nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
1775 atomic_add(nr_pages, &fs_info->async_delalloc_pages);
1776
1777 btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work);
1778
1779 start = cur_end + 1;
1780 }
1781 return true;
1782 }
1783
1784 /*
1785 * Run the delalloc range from start to end, and write back any dirty pages
1786 * covered by the range.
1787 */
run_delalloc_cow(struct btrfs_inode * inode,struct folio * locked_folio,u64 start,u64 end,struct writeback_control * wbc,bool pages_dirty)1788 static noinline int run_delalloc_cow(struct btrfs_inode *inode,
1789 struct folio *locked_folio, u64 start,
1790 u64 end, struct writeback_control *wbc,
1791 bool pages_dirty)
1792 {
1793 u64 done_offset = end;
1794 int ret;
1795
1796 while (start <= end) {
1797 ret = cow_file_range(inode, locked_folio, start, end,
1798 &done_offset, COW_FILE_RANGE_KEEP_LOCKED);
1799 if (ret)
1800 return ret;
1801 extent_write_locked_range(&inode->vfs_inode, locked_folio,
1802 start, done_offset, wbc, pages_dirty);
1803 start = done_offset + 1;
1804 }
1805
1806 return 1;
1807 }
1808
fallback_to_cow(struct btrfs_inode * inode,struct folio * locked_folio,const u64 start,const u64 end)1809 static int fallback_to_cow(struct btrfs_inode *inode,
1810 struct folio *locked_folio, const u64 start,
1811 const u64 end)
1812 {
1813 const bool is_space_ino = btrfs_is_free_space_inode(inode);
1814 const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root);
1815 const u64 range_bytes = end + 1 - start;
1816 struct extent_io_tree *io_tree = &inode->io_tree;
1817 struct extent_state *cached_state = NULL;
1818 u64 range_start = start;
1819 u64 count;
1820 int ret;
1821
1822 /*
1823 * If EXTENT_NORESERVE is set it means that when the buffered write was
1824 * made we had not enough available data space and therefore we did not
1825 * reserve data space for it, since we though we could do NOCOW for the
1826 * respective file range (either there is prealloc extent or the inode
1827 * has the NOCOW bit set).
1828 *
1829 * However when we need to fallback to COW mode (because for example the
1830 * block group for the corresponding extent was turned to RO mode by a
1831 * scrub or relocation) we need to do the following:
1832 *
1833 * 1) We increment the bytes_may_use counter of the data space info.
1834 * If COW succeeds, it allocates a new data extent and after doing
1835 * that it decrements the space info's bytes_may_use counter and
1836 * increments its bytes_reserved counter by the same amount (we do
1837 * this at btrfs_add_reserved_bytes()). So we need to increment the
1838 * bytes_may_use counter to compensate (when space is reserved at
1839 * buffered write time, the bytes_may_use counter is incremented);
1840 *
1841 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so
1842 * that if the COW path fails for any reason, it decrements (through
1843 * extent_clear_unlock_delalloc()) the bytes_may_use counter of the
1844 * data space info, which we incremented in the step above.
1845 *
1846 * If we need to fallback to cow and the inode corresponds to a free
1847 * space cache inode or an inode of the data relocation tree, we must
1848 * also increment bytes_may_use of the data space_info for the same
1849 * reason. Space caches and relocated data extents always get a prealloc
1850 * extent for them, however scrub or balance may have set the block
1851 * group that contains that extent to RO mode and therefore force COW
1852 * when starting writeback.
1853 */
1854 btrfs_lock_extent(io_tree, start, end, &cached_state);
1855 count = btrfs_count_range_bits(io_tree, &range_start, end, range_bytes,
1856 EXTENT_NORESERVE, 0, NULL);
1857 if (count > 0 || is_space_ino || is_reloc_ino) {
1858 u64 bytes = count;
1859 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1860 struct btrfs_space_info *sinfo = fs_info->data_sinfo;
1861
1862 if (is_space_ino || is_reloc_ino)
1863 bytes = range_bytes;
1864
1865 spin_lock(&sinfo->lock);
1866 btrfs_space_info_update_bytes_may_use(sinfo, bytes);
1867 spin_unlock(&sinfo->lock);
1868
1869 if (count > 0)
1870 btrfs_clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE,
1871 &cached_state);
1872 }
1873 btrfs_unlock_extent(io_tree, start, end, &cached_state);
1874
1875 /*
1876 * Don't try to create inline extents, as a mix of inline extent that
1877 * is written out and unlocked directly and a normal NOCOW extent
1878 * doesn't work.
1879 *
1880 * And here we do not unlock the folio after a successful run.
1881 * The folios will be unlocked after everything is finished, or by error handling.
1882 *
1883 * This is to ensure error handling won't need to clear dirty/ordered flags without
1884 * a locked folio, which can race with writeback.
1885 */
1886 ret = cow_file_range(inode, locked_folio, start, end, NULL,
1887 COW_FILE_RANGE_NO_INLINE | COW_FILE_RANGE_KEEP_LOCKED);
1888 ASSERT(ret != 1);
1889 return ret;
1890 }
1891
1892 struct can_nocow_file_extent_args {
1893 /* Input fields. */
1894
1895 /* Start file offset of the range we want to NOCOW. */
1896 u64 start;
1897 /* End file offset (inclusive) of the range we want to NOCOW. */
1898 u64 end;
1899 bool writeback_path;
1900 /*
1901 * Free the path passed to can_nocow_file_extent() once it's not needed
1902 * anymore.
1903 */
1904 bool free_path;
1905
1906 /*
1907 * Output fields. Only set when can_nocow_file_extent() returns 1.
1908 * The expected file extent for the NOCOW write.
1909 */
1910 struct btrfs_file_extent file_extent;
1911 };
1912
1913 /*
1914 * Check if we can NOCOW the file extent that the path points to.
1915 * This function may return with the path released, so the caller should check
1916 * if path->nodes[0] is NULL or not if it needs to use the path afterwards.
1917 *
1918 * Returns: < 0 on error
1919 * 0 if we can not NOCOW
1920 * 1 if we can NOCOW
1921 */
can_nocow_file_extent(struct btrfs_path * path,struct btrfs_key * key,struct btrfs_inode * inode,struct can_nocow_file_extent_args * args)1922 static int can_nocow_file_extent(struct btrfs_path *path,
1923 struct btrfs_key *key,
1924 struct btrfs_inode *inode,
1925 struct can_nocow_file_extent_args *args)
1926 {
1927 const bool is_freespace_inode = btrfs_is_free_space_inode(inode);
1928 struct extent_buffer *leaf = path->nodes[0];
1929 struct btrfs_root *root = inode->root;
1930 struct btrfs_file_extent_item *fi;
1931 struct btrfs_root *csum_root;
1932 u64 io_start;
1933 u64 extent_end;
1934 u8 extent_type;
1935 int can_nocow = 0;
1936 int ret = 0;
1937 bool nowait = path->nowait;
1938
1939 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
1940 extent_type = btrfs_file_extent_type(leaf, fi);
1941
1942 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
1943 goto out;
1944
1945 if (!(inode->flags & BTRFS_INODE_NODATACOW) &&
1946 extent_type == BTRFS_FILE_EXTENT_REG)
1947 goto out;
1948
1949 /*
1950 * If the extent was created before the generation where the last snapshot
1951 * for its subvolume was created, then this implies the extent is shared,
1952 * hence we must COW.
1953 */
1954 if (btrfs_file_extent_generation(leaf, fi) <=
1955 btrfs_root_last_snapshot(&root->root_item))
1956 goto out;
1957
1958 /* An explicit hole, must COW. */
1959 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
1960 goto out;
1961
1962 /* Compressed/encrypted/encoded extents must be COWed. */
1963 if (btrfs_file_extent_compression(leaf, fi) ||
1964 btrfs_file_extent_encryption(leaf, fi) ||
1965 btrfs_file_extent_other_encoding(leaf, fi))
1966 goto out;
1967
1968 extent_end = btrfs_file_extent_end(path);
1969
1970 args->file_extent.disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1971 args->file_extent.disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1972 args->file_extent.ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1973 args->file_extent.offset = btrfs_file_extent_offset(leaf, fi);
1974 args->file_extent.compression = btrfs_file_extent_compression(leaf, fi);
1975
1976 /*
1977 * The following checks can be expensive, as they need to take other
1978 * locks and do btree or rbtree searches, so release the path to avoid
1979 * blocking other tasks for too long.
1980 */
1981 btrfs_release_path(path);
1982
1983 ret = btrfs_cross_ref_exist(inode, key->offset - args->file_extent.offset,
1984 args->file_extent.disk_bytenr, path);
1985 WARN_ON_ONCE(ret > 0 && is_freespace_inode);
1986 if (ret != 0)
1987 goto out;
1988
1989 if (args->free_path) {
1990 /*
1991 * We don't need the path anymore, plus through the
1992 * btrfs_lookup_csums_list() call below we will end up allocating
1993 * another path. So free the path to avoid unnecessary extra
1994 * memory usage.
1995 */
1996 btrfs_free_path(path);
1997 path = NULL;
1998 }
1999
2000 /* If there are pending snapshots for this root, we must COW. */
2001 if (args->writeback_path && !is_freespace_inode &&
2002 atomic_read(&root->snapshot_force_cow))
2003 goto out;
2004
2005 args->file_extent.num_bytes = min(args->end + 1, extent_end) - args->start;
2006 args->file_extent.offset += args->start - key->offset;
2007 io_start = args->file_extent.disk_bytenr + args->file_extent.offset;
2008
2009 /*
2010 * Force COW if csums exist in the range. This ensures that csums for a
2011 * given extent are either valid or do not exist.
2012 */
2013
2014 csum_root = btrfs_csum_root(root->fs_info, io_start);
2015 ret = btrfs_lookup_csums_list(csum_root, io_start,
2016 io_start + args->file_extent.num_bytes - 1,
2017 NULL, nowait);
2018 WARN_ON_ONCE(ret > 0 && is_freespace_inode);
2019 if (ret != 0)
2020 goto out;
2021
2022 can_nocow = 1;
2023 out:
2024 if (args->free_path && path)
2025 btrfs_free_path(path);
2026
2027 return ret < 0 ? ret : can_nocow;
2028 }
2029
nocow_one_range(struct btrfs_inode * inode,struct folio * locked_folio,struct extent_state ** cached,struct can_nocow_file_extent_args * nocow_args,u64 file_pos,bool is_prealloc)2030 static int nocow_one_range(struct btrfs_inode *inode, struct folio *locked_folio,
2031 struct extent_state **cached,
2032 struct can_nocow_file_extent_args *nocow_args,
2033 u64 file_pos, bool is_prealloc)
2034 {
2035 struct btrfs_ordered_extent *ordered;
2036 const u64 len = nocow_args->file_extent.num_bytes;
2037 const u64 end = file_pos + len - 1;
2038 int ret = 0;
2039
2040 btrfs_lock_extent(&inode->io_tree, file_pos, end, cached);
2041
2042 if (is_prealloc) {
2043 struct extent_map *em;
2044
2045 em = btrfs_create_io_em(inode, file_pos, &nocow_args->file_extent,
2046 BTRFS_ORDERED_PREALLOC);
2047 if (IS_ERR(em)) {
2048 ret = PTR_ERR(em);
2049 goto error;
2050 }
2051 btrfs_free_extent_map(em);
2052 }
2053
2054 ordered = btrfs_alloc_ordered_extent(inode, file_pos, &nocow_args->file_extent,
2055 is_prealloc
2056 ? (1U << BTRFS_ORDERED_PREALLOC)
2057 : (1U << BTRFS_ORDERED_NOCOW));
2058 if (IS_ERR(ordered)) {
2059 if (is_prealloc)
2060 btrfs_drop_extent_map_range(inode, file_pos, end, false);
2061 ret = PTR_ERR(ordered);
2062 goto error;
2063 }
2064
2065 if (btrfs_is_data_reloc_root(inode->root))
2066 /*
2067 * Errors are handled later, as we must prevent
2068 * extent_clear_unlock_delalloc() in error handler from freeing
2069 * metadata of the created ordered extent.
2070 */
2071 ret = btrfs_reloc_clone_csums(ordered);
2072 btrfs_put_ordered_extent(ordered);
2073
2074 if (ret < 0)
2075 goto error;
2076 extent_clear_unlock_delalloc(inode, file_pos, end, locked_folio, cached,
2077 EXTENT_LOCKED | EXTENT_DELALLOC |
2078 EXTENT_CLEAR_DATA_RESV,
2079 PAGE_SET_ORDERED);
2080 return ret;
2081
2082 error:
2083 btrfs_cleanup_ordered_extents(inode, file_pos, len);
2084 extent_clear_unlock_delalloc(inode, file_pos, end, locked_folio, cached,
2085 EXTENT_LOCKED | EXTENT_DELALLOC |
2086 EXTENT_CLEAR_DATA_RESV,
2087 PAGE_UNLOCK | PAGE_START_WRITEBACK |
2088 PAGE_END_WRITEBACK);
2089 btrfs_err(inode->root->fs_info,
2090 "%s failed, root=%lld inode=%llu start=%llu len=%llu: %d",
2091 __func__, btrfs_root_id(inode->root), btrfs_ino(inode),
2092 file_pos, len, ret);
2093 return ret;
2094 }
2095
2096 /*
2097 * When nocow writeback calls back. This checks for snapshots or COW copies
2098 * of the extents that exist in the file, and COWs the file as required.
2099 *
2100 * If no cow copies or snapshots exist, we write directly to the existing
2101 * blocks on disk
2102 */
run_delalloc_nocow(struct btrfs_inode * inode,struct folio * locked_folio,const u64 start,const u64 end)2103 static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
2104 struct folio *locked_folio,
2105 const u64 start, const u64 end)
2106 {
2107 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2108 struct btrfs_root *root = inode->root;
2109 struct btrfs_path *path = NULL;
2110 u64 cow_start = (u64)-1;
2111 /*
2112 * If not 0, represents the inclusive end of the last fallback_to_cow()
2113 * range. Only for error handling.
2114 *
2115 * The same for nocow_end, it's to avoid double cleaning up the range
2116 * already cleaned by nocow_one_range().
2117 */
2118 u64 cow_end = 0;
2119 u64 nocow_end = 0;
2120 u64 cur_offset = start;
2121 int ret;
2122 bool check_prev = true;
2123 u64 ino = btrfs_ino(inode);
2124 struct can_nocow_file_extent_args nocow_args = { 0 };
2125 /* The range that has ordered extent(s). */
2126 u64 oe_cleanup_start;
2127 u64 oe_cleanup_len = 0;
2128 /* The range that is untouched. */
2129 u64 untouched_start;
2130 u64 untouched_len = 0;
2131
2132 /*
2133 * Normally on a zoned device we're only doing COW writes, but in case
2134 * of relocation on a zoned filesystem serializes I/O so that we're only
2135 * writing sequentially and can end up here as well.
2136 */
2137 ASSERT(!btrfs_is_zoned(fs_info) || btrfs_is_data_reloc_root(root));
2138
2139 if (btrfs_is_shutdown(fs_info)) {
2140 ret = -EIO;
2141 goto error;
2142 }
2143 path = btrfs_alloc_path();
2144 if (!path) {
2145 ret = -ENOMEM;
2146 goto error;
2147 }
2148
2149 nocow_args.end = end;
2150 nocow_args.writeback_path = true;
2151
2152 while (cur_offset <= end) {
2153 struct btrfs_block_group *nocow_bg = NULL;
2154 struct btrfs_key found_key;
2155 struct btrfs_file_extent_item *fi;
2156 struct extent_buffer *leaf;
2157 struct extent_state *cached_state = NULL;
2158 u64 extent_end;
2159 int extent_type;
2160
2161 ret = btrfs_lookup_file_extent(NULL, root, path, ino,
2162 cur_offset, 0);
2163 if (ret < 0)
2164 goto error;
2165
2166 /*
2167 * If there is no extent for our range when doing the initial
2168 * search, then go back to the previous slot as it will be the
2169 * one containing the search offset
2170 */
2171 if (ret > 0 && path->slots[0] > 0 && check_prev) {
2172 leaf = path->nodes[0];
2173 btrfs_item_key_to_cpu(leaf, &found_key,
2174 path->slots[0] - 1);
2175 if (found_key.objectid == ino &&
2176 found_key.type == BTRFS_EXTENT_DATA_KEY)
2177 path->slots[0]--;
2178 }
2179 check_prev = false;
2180 next_slot:
2181 /* Go to next leaf if we have exhausted the current one */
2182 leaf = path->nodes[0];
2183 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2184 ret = btrfs_next_leaf(root, path);
2185 if (ret < 0)
2186 goto error;
2187 if (ret > 0)
2188 break;
2189 leaf = path->nodes[0];
2190 }
2191
2192 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2193
2194 /* Didn't find anything for our INO */
2195 if (found_key.objectid > ino)
2196 break;
2197 /*
2198 * Keep searching until we find an EXTENT_ITEM or there are no
2199 * more extents for this inode
2200 */
2201 if (WARN_ON_ONCE(found_key.objectid < ino) ||
2202 found_key.type < BTRFS_EXTENT_DATA_KEY) {
2203 path->slots[0]++;
2204 goto next_slot;
2205 }
2206
2207 /* Found key is not EXTENT_DATA_KEY or starts after req range */
2208 if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
2209 found_key.offset > end)
2210 break;
2211
2212 /*
2213 * If the found extent starts after requested offset, then
2214 * adjust cur_offset to be right before this extent begins.
2215 */
2216 if (found_key.offset > cur_offset) {
2217 if (cow_start == (u64)-1)
2218 cow_start = cur_offset;
2219 cur_offset = found_key.offset;
2220 goto next_slot;
2221 }
2222
2223 /*
2224 * Found extent which begins before our range and potentially
2225 * intersect it
2226 */
2227 fi = btrfs_item_ptr(leaf, path->slots[0],
2228 struct btrfs_file_extent_item);
2229 extent_type = btrfs_file_extent_type(leaf, fi);
2230 /* If this is triggered then we have a memory corruption. */
2231 ASSERT(extent_type < BTRFS_NR_FILE_EXTENT_TYPES);
2232 if (WARN_ON(extent_type >= BTRFS_NR_FILE_EXTENT_TYPES)) {
2233 ret = -EUCLEAN;
2234 goto error;
2235 }
2236 extent_end = btrfs_file_extent_end(path);
2237
2238 /*
2239 * If the extent we got ends before our current offset, skip to
2240 * the next extent.
2241 */
2242 if (extent_end <= cur_offset) {
2243 path->slots[0]++;
2244 goto next_slot;
2245 }
2246
2247 nocow_args.start = cur_offset;
2248 ret = can_nocow_file_extent(path, &found_key, inode, &nocow_args);
2249 if (ret < 0)
2250 goto error;
2251 if (ret == 0)
2252 goto must_cow;
2253
2254 ret = 0;
2255 nocow_bg = btrfs_inc_nocow_writers(fs_info,
2256 nocow_args.file_extent.disk_bytenr +
2257 nocow_args.file_extent.offset);
2258 if (!nocow_bg) {
2259 must_cow:
2260 /*
2261 * If we can't perform NOCOW writeback for the range,
2262 * then record the beginning of the range that needs to
2263 * be COWed. It will be written out before the next
2264 * NOCOW range if we find one, or when exiting this
2265 * loop.
2266 */
2267 if (cow_start == (u64)-1)
2268 cow_start = cur_offset;
2269 cur_offset = extent_end;
2270 if (cur_offset > end)
2271 break;
2272 if (!path->nodes[0])
2273 continue;
2274 path->slots[0]++;
2275 goto next_slot;
2276 }
2277
2278 /*
2279 * COW range from cow_start to found_key.offset - 1. As the key
2280 * will contain the beginning of the first extent that can be
2281 * NOCOW, following one which needs to be COW'ed
2282 */
2283 if (cow_start != (u64)-1) {
2284 ret = fallback_to_cow(inode, locked_folio, cow_start,
2285 found_key.offset - 1);
2286 if (ret) {
2287 cow_end = found_key.offset - 1;
2288 btrfs_dec_nocow_writers(nocow_bg);
2289 goto error;
2290 }
2291 cow_start = (u64)-1;
2292 }
2293
2294 ret = nocow_one_range(inode, locked_folio, &cached_state,
2295 &nocow_args, cur_offset,
2296 extent_type == BTRFS_FILE_EXTENT_PREALLOC);
2297 btrfs_dec_nocow_writers(nocow_bg);
2298 if (ret < 0) {
2299 nocow_end = cur_offset + nocow_args.file_extent.num_bytes - 1;
2300 goto error;
2301 }
2302 cur_offset = extent_end;
2303 }
2304 btrfs_release_path(path);
2305
2306 if (cur_offset <= end && cow_start == (u64)-1)
2307 cow_start = cur_offset;
2308
2309 if (cow_start != (u64)-1) {
2310 ret = fallback_to_cow(inode, locked_folio, cow_start, end);
2311 if (ret) {
2312 cow_end = end;
2313 goto error;
2314 }
2315 cow_start = (u64)-1;
2316 }
2317
2318 /*
2319 * Everything is finished without an error, can unlock the folios now.
2320 *
2321 * No need to touch the io tree range nor set folio ordered flag, as
2322 * fallback_to_cow() and nocow_one_range() have already handled them.
2323 */
2324 extent_clear_unlock_delalloc(inode, start, end, locked_folio, NULL, 0, PAGE_UNLOCK);
2325
2326 btrfs_free_path(path);
2327 return 0;
2328
2329 error:
2330 if (cow_start == (u64)-1) {
2331 /*
2332 * case a)
2333 * start cur_offset end
2334 * | OE cleanup | Untouched |
2335 *
2336 * We finished a fallback_to_cow() or nocow_one_range() call,
2337 * but failed to check the next range.
2338 *
2339 * or
2340 * start cur_offset nocow_end end
2341 * | OE cleanup | Skip | Untouched |
2342 *
2343 * nocow_one_range() failed, the range [cur_offset, nocow_end] is
2344 * already cleaned up.
2345 */
2346 oe_cleanup_start = start;
2347 oe_cleanup_len = cur_offset - start;
2348 if (nocow_end)
2349 untouched_start = nocow_end + 1;
2350 else
2351 untouched_start = cur_offset;
2352 untouched_len = end + 1 - untouched_start;
2353 } else if (cow_start != (u64)-1 && cow_end == 0) {
2354 /*
2355 * case b)
2356 * start cow_start cur_offset end
2357 * | OE cleanup | Untouched |
2358 *
2359 * We got a range that needs COW, but before we hit the next NOCOW range,
2360 * thus [cow_start, cur_offset) doesn't yet have any OE.
2361 */
2362 oe_cleanup_start = start;
2363 oe_cleanup_len = cow_start - start;
2364 untouched_start = cow_start;
2365 untouched_len = end + 1 - untouched_start;
2366 } else {
2367 /*
2368 * case c)
2369 * start cow_start cow_end end
2370 * | OE cleanup | Skip | Untouched |
2371 *
2372 * fallback_to_cow() failed, and fallback_to_cow() will do the
2373 * cleanup for its range, we shouldn't touch the range
2374 * [cow_start, cow_end].
2375 */
2376 ASSERT(cow_start != (u64)-1 && cow_end != 0);
2377 oe_cleanup_start = start;
2378 oe_cleanup_len = cow_start - start;
2379 untouched_start = cow_end + 1;
2380 untouched_len = end + 1 - untouched_start;
2381 }
2382
2383 if (oe_cleanup_len) {
2384 const u64 oe_cleanup_end = oe_cleanup_start + oe_cleanup_len - 1;
2385 btrfs_cleanup_ordered_extents(inode, oe_cleanup_start, oe_cleanup_len);
2386 extent_clear_unlock_delalloc(inode, oe_cleanup_start, oe_cleanup_end,
2387 locked_folio, NULL,
2388 EXTENT_LOCKED | EXTENT_DELALLOC,
2389 PAGE_UNLOCK | PAGE_START_WRITEBACK |
2390 PAGE_END_WRITEBACK);
2391 }
2392
2393 if (untouched_len) {
2394 struct extent_state *cached = NULL;
2395 const u64 untouched_end = untouched_start + untouched_len - 1;
2396
2397 /*
2398 * We need to lock the extent here because we're clearing DELALLOC and
2399 * we're not locked at this point.
2400 */
2401 btrfs_lock_extent(&inode->io_tree, untouched_start, untouched_end, &cached);
2402 extent_clear_unlock_delalloc(inode, untouched_start, untouched_end,
2403 locked_folio, &cached,
2404 EXTENT_LOCKED | EXTENT_DELALLOC |
2405 EXTENT_DEFRAG |
2406 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
2407 PAGE_START_WRITEBACK |
2408 PAGE_END_WRITEBACK);
2409 btrfs_qgroup_free_data(inode, NULL, untouched_start, untouched_len, NULL);
2410 }
2411 btrfs_free_path(path);
2412 btrfs_err(fs_info,
2413 "%s failed, root=%llu inode=%llu start=%llu len=%llu cur_offset=%llu oe_cleanup=%llu oe_cleanup_len=%llu untouched_start=%llu untouched_len=%llu: %d",
2414 __func__, btrfs_root_id(inode->root), btrfs_ino(inode),
2415 start, end + 1 - start, cur_offset, oe_cleanup_start, oe_cleanup_len,
2416 untouched_start, untouched_len, ret);
2417 return ret;
2418 }
2419
should_nocow(struct btrfs_inode * inode,u64 start,u64 end)2420 static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end)
2421 {
2422 if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) {
2423 if (inode->defrag_bytes &&
2424 btrfs_test_range_bit_exists(&inode->io_tree, start, end, EXTENT_DEFRAG))
2425 return false;
2426 return true;
2427 }
2428 return false;
2429 }
2430
2431 /*
2432 * Function to process delayed allocation (create CoW) for ranges which are
2433 * being touched for the first time.
2434 */
btrfs_run_delalloc_range(struct btrfs_inode * inode,struct folio * locked_folio,u64 start,u64 end,struct writeback_control * wbc)2435 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_folio,
2436 u64 start, u64 end, struct writeback_control *wbc)
2437 {
2438 const bool zoned = btrfs_is_zoned(inode->root->fs_info);
2439
2440 /*
2441 * The range must cover part of the @locked_folio, or a return of 1
2442 * can confuse the caller.
2443 */
2444 ASSERT(!(end <= folio_pos(locked_folio) ||
2445 start >= folio_next_pos(locked_folio)));
2446
2447 if (should_nocow(inode, start, end))
2448 return run_delalloc_nocow(inode, locked_folio, start, end);
2449
2450 if (btrfs_inode_can_compress(inode) &&
2451 inode_need_compress(inode, start, end) &&
2452 run_delalloc_compressed(inode, locked_folio, start, end, wbc))
2453 return 1;
2454
2455 if (zoned)
2456 return run_delalloc_cow(inode, locked_folio, start, end, wbc, true);
2457 else
2458 return cow_file_range(inode, locked_folio, start, end, NULL, 0);
2459 }
2460
btrfs_split_delalloc_extent(struct btrfs_inode * inode,struct extent_state * orig,u64 split)2461 void btrfs_split_delalloc_extent(struct btrfs_inode *inode,
2462 struct extent_state *orig, u64 split)
2463 {
2464 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2465 u64 size;
2466
2467 lockdep_assert_held(&inode->io_tree.lock);
2468
2469 /* not delalloc, ignore it */
2470 if (!(orig->state & EXTENT_DELALLOC))
2471 return;
2472
2473 size = orig->end - orig->start + 1;
2474 if (size > fs_info->max_extent_size) {
2475 u32 num_extents;
2476 u64 new_size;
2477
2478 /*
2479 * See the explanation in btrfs_merge_delalloc_extent, the same
2480 * applies here, just in reverse.
2481 */
2482 new_size = orig->end - split + 1;
2483 num_extents = count_max_extents(fs_info, new_size);
2484 new_size = split - orig->start;
2485 num_extents += count_max_extents(fs_info, new_size);
2486 if (count_max_extents(fs_info, size) >= num_extents)
2487 return;
2488 }
2489
2490 spin_lock(&inode->lock);
2491 btrfs_mod_outstanding_extents(inode, 1);
2492 spin_unlock(&inode->lock);
2493 }
2494
2495 /*
2496 * Handle merged delayed allocation extents so we can keep track of new extents
2497 * that are just merged onto old extents, such as when we are doing sequential
2498 * writes, so we can properly account for the metadata space we'll need.
2499 */
btrfs_merge_delalloc_extent(struct btrfs_inode * inode,struct extent_state * new,struct extent_state * other)2500 void btrfs_merge_delalloc_extent(struct btrfs_inode *inode, struct extent_state *new,
2501 struct extent_state *other)
2502 {
2503 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2504 u64 new_size, old_size;
2505 u32 num_extents;
2506
2507 lockdep_assert_held(&inode->io_tree.lock);
2508
2509 /* not delalloc, ignore it */
2510 if (!(other->state & EXTENT_DELALLOC))
2511 return;
2512
2513 if (new->start > other->start)
2514 new_size = new->end - other->start + 1;
2515 else
2516 new_size = other->end - new->start + 1;
2517
2518 /* we're not bigger than the max, unreserve the space and go */
2519 if (new_size <= fs_info->max_extent_size) {
2520 spin_lock(&inode->lock);
2521 btrfs_mod_outstanding_extents(inode, -1);
2522 spin_unlock(&inode->lock);
2523 return;
2524 }
2525
2526 /*
2527 * We have to add up either side to figure out how many extents were
2528 * accounted for before we merged into one big extent. If the number of
2529 * extents we accounted for is <= the amount we need for the new range
2530 * then we can return, otherwise drop. Think of it like this
2531 *
2532 * [ 4k][MAX_SIZE]
2533 *
2534 * So we've grown the extent by a MAX_SIZE extent, this would mean we
2535 * need 2 outstanding extents, on one side we have 1 and the other side
2536 * we have 1 so they are == and we can return. But in this case
2537 *
2538 * [MAX_SIZE+4k][MAX_SIZE+4k]
2539 *
2540 * Each range on their own accounts for 2 extents, but merged together
2541 * they are only 3 extents worth of accounting, so we need to drop in
2542 * this case.
2543 */
2544 old_size = other->end - other->start + 1;
2545 num_extents = count_max_extents(fs_info, old_size);
2546 old_size = new->end - new->start + 1;
2547 num_extents += count_max_extents(fs_info, old_size);
2548 if (count_max_extents(fs_info, new_size) >= num_extents)
2549 return;
2550
2551 spin_lock(&inode->lock);
2552 btrfs_mod_outstanding_extents(inode, -1);
2553 spin_unlock(&inode->lock);
2554 }
2555
btrfs_add_delalloc_inode(struct btrfs_inode * inode)2556 static void btrfs_add_delalloc_inode(struct btrfs_inode *inode)
2557 {
2558 struct btrfs_root *root = inode->root;
2559 struct btrfs_fs_info *fs_info = root->fs_info;
2560
2561 spin_lock(&root->delalloc_lock);
2562 ASSERT(list_empty(&inode->delalloc_inodes));
2563 list_add_tail(&inode->delalloc_inodes, &root->delalloc_inodes);
2564 root->nr_delalloc_inodes++;
2565 if (root->nr_delalloc_inodes == 1) {
2566 spin_lock(&fs_info->delalloc_root_lock);
2567 ASSERT(list_empty(&root->delalloc_root));
2568 list_add_tail(&root->delalloc_root, &fs_info->delalloc_roots);
2569 spin_unlock(&fs_info->delalloc_root_lock);
2570 }
2571 spin_unlock(&root->delalloc_lock);
2572 }
2573
btrfs_del_delalloc_inode(struct btrfs_inode * inode)2574 void btrfs_del_delalloc_inode(struct btrfs_inode *inode)
2575 {
2576 struct btrfs_root *root = inode->root;
2577 struct btrfs_fs_info *fs_info = root->fs_info;
2578
2579 lockdep_assert_held(&root->delalloc_lock);
2580
2581 /*
2582 * We may be called after the inode was already deleted from the list,
2583 * namely in the transaction abort path btrfs_destroy_delalloc_inodes(),
2584 * and then later through btrfs_clear_delalloc_extent() while the inode
2585 * still has ->delalloc_bytes > 0.
2586 */
2587 if (!list_empty(&inode->delalloc_inodes)) {
2588 list_del_init(&inode->delalloc_inodes);
2589 root->nr_delalloc_inodes--;
2590 if (!root->nr_delalloc_inodes) {
2591 ASSERT(list_empty(&root->delalloc_inodes));
2592 spin_lock(&fs_info->delalloc_root_lock);
2593 ASSERT(!list_empty(&root->delalloc_root));
2594 list_del_init(&root->delalloc_root);
2595 spin_unlock(&fs_info->delalloc_root_lock);
2596 }
2597 }
2598 }
2599
2600 /*
2601 * Properly track delayed allocation bytes in the inode and to maintain the
2602 * list of inodes that have pending delalloc work to be done.
2603 */
btrfs_set_delalloc_extent(struct btrfs_inode * inode,struct extent_state * state,u32 bits)2604 void btrfs_set_delalloc_extent(struct btrfs_inode *inode, struct extent_state *state,
2605 u32 bits)
2606 {
2607 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2608
2609 lockdep_assert_held(&inode->io_tree.lock);
2610
2611 if ((bits & EXTENT_DEFRAG) && !(bits & EXTENT_DELALLOC))
2612 WARN_ON(1);
2613 /*
2614 * set_bit and clear bit hooks normally require _irqsave/restore
2615 * but in this case, we are only testing for the DELALLOC
2616 * bit, which is only set or cleared with irqs on
2617 */
2618 if (!(state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
2619 u64 len = state->end + 1 - state->start;
2620 u64 prev_delalloc_bytes;
2621 u32 num_extents = count_max_extents(fs_info, len);
2622
2623 spin_lock(&inode->lock);
2624 btrfs_mod_outstanding_extents(inode, num_extents);
2625 spin_unlock(&inode->lock);
2626
2627 /* For sanity tests */
2628 if (btrfs_is_testing(fs_info))
2629 return;
2630
2631 percpu_counter_add_batch(&fs_info->delalloc_bytes, len,
2632 fs_info->delalloc_batch);
2633 spin_lock(&inode->lock);
2634 prev_delalloc_bytes = inode->delalloc_bytes;
2635 inode->delalloc_bytes += len;
2636 if (bits & EXTENT_DEFRAG)
2637 inode->defrag_bytes += len;
2638 spin_unlock(&inode->lock);
2639
2640 /*
2641 * We don't need to be under the protection of the inode's lock,
2642 * because we are called while holding the inode's io_tree lock
2643 * and are therefore protected against concurrent calls of this
2644 * function and btrfs_clear_delalloc_extent().
2645 */
2646 if (!btrfs_is_free_space_inode(inode) && prev_delalloc_bytes == 0)
2647 btrfs_add_delalloc_inode(inode);
2648 }
2649
2650 if (!(state->state & EXTENT_DELALLOC_NEW) &&
2651 (bits & EXTENT_DELALLOC_NEW)) {
2652 spin_lock(&inode->lock);
2653 inode->new_delalloc_bytes += state->end + 1 - state->start;
2654 spin_unlock(&inode->lock);
2655 }
2656 }
2657
2658 /*
2659 * Once a range is no longer delalloc this function ensures that proper
2660 * accounting happens.
2661 */
btrfs_clear_delalloc_extent(struct btrfs_inode * inode,struct extent_state * state,u32 bits)2662 void btrfs_clear_delalloc_extent(struct btrfs_inode *inode,
2663 struct extent_state *state, u32 bits)
2664 {
2665 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2666 u64 len = state->end + 1 - state->start;
2667 u32 num_extents = count_max_extents(fs_info, len);
2668
2669 lockdep_assert_held(&inode->io_tree.lock);
2670
2671 if ((state->state & EXTENT_DEFRAG) && (bits & EXTENT_DEFRAG)) {
2672 spin_lock(&inode->lock);
2673 inode->defrag_bytes -= len;
2674 spin_unlock(&inode->lock);
2675 }
2676
2677 /*
2678 * set_bit and clear bit hooks normally require _irqsave/restore
2679 * but in this case, we are only testing for the DELALLOC
2680 * bit, which is only set or cleared with irqs on
2681 */
2682 if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
2683 struct btrfs_root *root = inode->root;
2684 u64 new_delalloc_bytes;
2685
2686 spin_lock(&inode->lock);
2687 btrfs_mod_outstanding_extents(inode, -num_extents);
2688 spin_unlock(&inode->lock);
2689
2690 /*
2691 * We don't reserve metadata space for space cache inodes so we
2692 * don't need to call delalloc_release_metadata if there is an
2693 * error.
2694 */
2695 if (bits & EXTENT_CLEAR_META_RESV &&
2696 root != fs_info->tree_root)
2697 btrfs_delalloc_release_metadata(inode, len, true);
2698
2699 /* For sanity tests. */
2700 if (btrfs_is_testing(fs_info))
2701 return;
2702
2703 if (!btrfs_is_data_reloc_root(root) &&
2704 !btrfs_is_free_space_inode(inode) &&
2705 !(state->state & EXTENT_NORESERVE) &&
2706 (bits & EXTENT_CLEAR_DATA_RESV))
2707 btrfs_free_reserved_data_space_noquota(inode, len);
2708
2709 percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
2710 fs_info->delalloc_batch);
2711 spin_lock(&inode->lock);
2712 inode->delalloc_bytes -= len;
2713 new_delalloc_bytes = inode->delalloc_bytes;
2714 spin_unlock(&inode->lock);
2715
2716 /*
2717 * We don't need to be under the protection of the inode's lock,
2718 * because we are called while holding the inode's io_tree lock
2719 * and are therefore protected against concurrent calls of this
2720 * function and btrfs_set_delalloc_extent().
2721 */
2722 if (!btrfs_is_free_space_inode(inode) && new_delalloc_bytes == 0) {
2723 spin_lock(&root->delalloc_lock);
2724 btrfs_del_delalloc_inode(inode);
2725 spin_unlock(&root->delalloc_lock);
2726 }
2727 }
2728
2729 if ((state->state & EXTENT_DELALLOC_NEW) &&
2730 (bits & EXTENT_DELALLOC_NEW)) {
2731 spin_lock(&inode->lock);
2732 ASSERT(inode->new_delalloc_bytes >= len);
2733 inode->new_delalloc_bytes -= len;
2734 if (bits & EXTENT_ADD_INODE_BYTES)
2735 inode_add_bytes(&inode->vfs_inode, len);
2736 spin_unlock(&inode->lock);
2737 }
2738 }
2739
2740 /*
2741 * given a list of ordered sums record them in the inode. This happens
2742 * at IO completion time based on sums calculated at bio submission time.
2743 */
add_pending_csums(struct btrfs_trans_handle * trans,struct list_head * list)2744 static int add_pending_csums(struct btrfs_trans_handle *trans,
2745 struct list_head *list)
2746 {
2747 struct btrfs_ordered_sum *sum;
2748 struct btrfs_root *csum_root = NULL;
2749 int ret;
2750
2751 list_for_each_entry(sum, list, list) {
2752 trans->adding_csums = true;
2753 if (!csum_root)
2754 csum_root = btrfs_csum_root(trans->fs_info,
2755 sum->logical);
2756 ret = btrfs_csum_file_blocks(trans, csum_root, sum);
2757 trans->adding_csums = false;
2758 if (ret)
2759 return ret;
2760 }
2761 return 0;
2762 }
2763
btrfs_find_new_delalloc_bytes(struct btrfs_inode * inode,const u64 start,const u64 len,struct extent_state ** cached_state)2764 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
2765 const u64 start,
2766 const u64 len,
2767 struct extent_state **cached_state)
2768 {
2769 u64 search_start = start;
2770 const u64 end = start + len - 1;
2771
2772 while (search_start < end) {
2773 const u64 search_len = end - search_start + 1;
2774 struct extent_map *em;
2775 u64 em_len;
2776 int ret = 0;
2777
2778 em = btrfs_get_extent(inode, NULL, search_start, search_len);
2779 if (IS_ERR(em))
2780 return PTR_ERR(em);
2781
2782 if (em->disk_bytenr != EXTENT_MAP_HOLE)
2783 goto next;
2784
2785 em_len = em->len;
2786 if (em->start < search_start)
2787 em_len -= search_start - em->start;
2788 if (em_len > search_len)
2789 em_len = search_len;
2790
2791 ret = btrfs_set_extent_bit(&inode->io_tree, search_start,
2792 search_start + em_len - 1,
2793 EXTENT_DELALLOC_NEW, cached_state);
2794 next:
2795 search_start = btrfs_extent_map_end(em);
2796 btrfs_free_extent_map(em);
2797 if (ret)
2798 return ret;
2799 }
2800 return 0;
2801 }
2802
btrfs_set_extent_delalloc(struct btrfs_inode * inode,u64 start,u64 end,unsigned int extra_bits,struct extent_state ** cached_state)2803 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
2804 unsigned int extra_bits,
2805 struct extent_state **cached_state)
2806 {
2807 WARN_ON(PAGE_ALIGNED(end));
2808
2809 if (start >= i_size_read(&inode->vfs_inode) &&
2810 !(inode->flags & BTRFS_INODE_PREALLOC)) {
2811 /*
2812 * There can't be any extents following eof in this case so just
2813 * set the delalloc new bit for the range directly.
2814 */
2815 extra_bits |= EXTENT_DELALLOC_NEW;
2816 } else {
2817 int ret;
2818
2819 ret = btrfs_find_new_delalloc_bytes(inode, start,
2820 end + 1 - start,
2821 cached_state);
2822 if (ret)
2823 return ret;
2824 }
2825
2826 return btrfs_set_extent_bit(&inode->io_tree, start, end,
2827 EXTENT_DELALLOC | extra_bits, cached_state);
2828 }
2829
2830 /* see btrfs_writepage_start_hook for details on why this is required */
2831 struct btrfs_writepage_fixup {
2832 struct folio *folio;
2833 struct btrfs_inode *inode;
2834 struct btrfs_work work;
2835 };
2836
btrfs_writepage_fixup_worker(struct btrfs_work * work)2837 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
2838 {
2839 struct btrfs_writepage_fixup *fixup =
2840 container_of(work, struct btrfs_writepage_fixup, work);
2841 struct btrfs_ordered_extent *ordered;
2842 struct extent_state *cached_state = NULL;
2843 struct extent_changeset *data_reserved = NULL;
2844 struct folio *folio = fixup->folio;
2845 struct btrfs_inode *inode = fixup->inode;
2846 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2847 u64 page_start = folio_pos(folio);
2848 u64 page_end = folio_next_pos(folio) - 1;
2849 int ret = 0;
2850 bool free_delalloc_space = true;
2851
2852 /*
2853 * This is similar to page_mkwrite, we need to reserve the space before
2854 * we take the folio lock.
2855 */
2856 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
2857 folio_size(folio));
2858 again:
2859 folio_lock(folio);
2860
2861 /*
2862 * Before we queued this fixup, we took a reference on the folio.
2863 * folio->mapping may go NULL, but it shouldn't be moved to a different
2864 * address space.
2865 */
2866 if (!folio->mapping || !folio_test_dirty(folio) ||
2867 !folio_test_checked(folio)) {
2868 /*
2869 * Unfortunately this is a little tricky, either
2870 *
2871 * 1) We got here and our folio had already been dealt with and
2872 * we reserved our space, thus ret == 0, so we need to just
2873 * drop our space reservation and bail. This can happen the
2874 * first time we come into the fixup worker, or could happen
2875 * while waiting for the ordered extent.
2876 * 2) Our folio was already dealt with, but we happened to get an
2877 * ENOSPC above from the btrfs_delalloc_reserve_space. In
2878 * this case we obviously don't have anything to release, but
2879 * because the folio was already dealt with we don't want to
2880 * mark the folio with an error, so make sure we're resetting
2881 * ret to 0. This is why we have this check _before_ the ret
2882 * check, because we do not want to have a surprise ENOSPC
2883 * when the folio was already properly dealt with.
2884 */
2885 if (!ret) {
2886 btrfs_delalloc_release_extents(inode, folio_size(folio));
2887 btrfs_delalloc_release_space(inode, data_reserved,
2888 page_start, folio_size(folio),
2889 true);
2890 }
2891 ret = 0;
2892 goto out_page;
2893 }
2894
2895 /*
2896 * We can't mess with the folio state unless it is locked, so now that
2897 * it is locked bail if we failed to make our space reservation.
2898 */
2899 if (ret)
2900 goto out_page;
2901
2902 btrfs_lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
2903
2904 /* already ordered? We're done */
2905 if (folio_test_ordered(folio))
2906 goto out_reserved;
2907
2908 ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
2909 if (ordered) {
2910 btrfs_unlock_extent(&inode->io_tree, page_start, page_end,
2911 &cached_state);
2912 folio_unlock(folio);
2913 btrfs_start_ordered_extent(ordered);
2914 btrfs_put_ordered_extent(ordered);
2915 goto again;
2916 }
2917
2918 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
2919 &cached_state);
2920 if (ret)
2921 goto out_reserved;
2922
2923 /*
2924 * Everything went as planned, we're now the owner of a dirty page with
2925 * delayed allocation bits set and space reserved for our COW
2926 * destination.
2927 *
2928 * The page was dirty when we started, nothing should have cleaned it.
2929 */
2930 BUG_ON(!folio_test_dirty(folio));
2931 free_delalloc_space = false;
2932 out_reserved:
2933 btrfs_delalloc_release_extents(inode, PAGE_SIZE);
2934 if (free_delalloc_space)
2935 btrfs_delalloc_release_space(inode, data_reserved, page_start,
2936 PAGE_SIZE, true);
2937 btrfs_unlock_extent(&inode->io_tree, page_start, page_end, &cached_state);
2938 out_page:
2939 if (ret) {
2940 /*
2941 * We hit ENOSPC or other errors. Update the mapping and page
2942 * to reflect the errors and clean the page.
2943 */
2944 mapping_set_error(folio->mapping, ret);
2945 btrfs_mark_ordered_io_finished(inode, folio, page_start,
2946 folio_size(folio), !ret);
2947 folio_clear_dirty_for_io(folio);
2948 }
2949 btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE);
2950 folio_unlock(folio);
2951 folio_put(folio);
2952 kfree(fixup);
2953 extent_changeset_free(data_reserved);
2954 /*
2955 * As a precaution, do a delayed iput in case it would be the last iput
2956 * that could need flushing space. Recursing back to fixup worker would
2957 * deadlock.
2958 */
2959 btrfs_add_delayed_iput(inode);
2960 }
2961
2962 /*
2963 * There are a few paths in the higher layers of the kernel that directly
2964 * set the folio dirty bit without asking the filesystem if it is a
2965 * good idea. This causes problems because we want to make sure COW
2966 * properly happens and the data=ordered rules are followed.
2967 *
2968 * In our case any range that doesn't have the ORDERED bit set
2969 * hasn't been properly setup for IO. We kick off an async process
2970 * to fix it up. The async helper will wait for ordered extents, set
2971 * the delalloc bit and make it safe to write the folio.
2972 */
btrfs_writepage_cow_fixup(struct folio * folio)2973 int btrfs_writepage_cow_fixup(struct folio *folio)
2974 {
2975 struct inode *inode = folio->mapping->host;
2976 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
2977 struct btrfs_writepage_fixup *fixup;
2978
2979 /* This folio has ordered extent covering it already */
2980 if (folio_test_ordered(folio))
2981 return 0;
2982
2983 /*
2984 * For experimental build, we error out instead of EAGAIN.
2985 *
2986 * We should not hit such out-of-band dirty folios anymore.
2987 */
2988 if (IS_ENABLED(CONFIG_BTRFS_EXPERIMENTAL)) {
2989 DEBUG_WARN();
2990 btrfs_err_rl(fs_info,
2991 "root %lld ino %llu folio %llu is marked dirty without notifying the fs",
2992 btrfs_root_id(BTRFS_I(inode)->root),
2993 btrfs_ino(BTRFS_I(inode)),
2994 folio_pos(folio));
2995 return -EUCLEAN;
2996 }
2997
2998 /*
2999 * folio_checked is set below when we create a fixup worker for this
3000 * folio, don't try to create another one if we're already
3001 * folio_test_checked.
3002 *
3003 * The extent_io writepage code will redirty the foio if we send back
3004 * EAGAIN.
3005 */
3006 if (folio_test_checked(folio))
3007 return -EAGAIN;
3008
3009 fixup = kzalloc_obj(*fixup, GFP_NOFS);
3010 if (!fixup)
3011 return -EAGAIN;
3012
3013 /*
3014 * We are already holding a reference to this inode from
3015 * write_cache_pages. We need to hold it because the space reservation
3016 * takes place outside of the folio lock, and we can't trust
3017 * folio->mapping outside of the folio lock.
3018 */
3019 ihold(inode);
3020 btrfs_folio_set_checked(fs_info, folio, folio_pos(folio), folio_size(folio));
3021 folio_get(folio);
3022 btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL);
3023 fixup->folio = folio;
3024 fixup->inode = BTRFS_I(inode);
3025 btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
3026
3027 return -EAGAIN;
3028 }
3029
insert_reserved_file_extent(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,u64 file_pos,struct btrfs_file_extent_item * stack_fi,const bool update_inode_bytes,u64 qgroup_reserved)3030 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
3031 struct btrfs_inode *inode, u64 file_pos,
3032 struct btrfs_file_extent_item *stack_fi,
3033 const bool update_inode_bytes,
3034 u64 qgroup_reserved)
3035 {
3036 struct btrfs_root *root = inode->root;
3037 const u64 sectorsize = root->fs_info->sectorsize;
3038 BTRFS_PATH_AUTO_FREE(path);
3039 struct extent_buffer *leaf;
3040 struct btrfs_key ins;
3041 u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi);
3042 u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi);
3043 u64 offset = btrfs_stack_file_extent_offset(stack_fi);
3044 u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi);
3045 u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi);
3046 struct btrfs_drop_extents_args drop_args = { 0 };
3047 int ret;
3048
3049 path = btrfs_alloc_path();
3050 if (!path)
3051 return -ENOMEM;
3052
3053 /*
3054 * we may be replacing one extent in the tree with another.
3055 * The new extent is pinned in the extent map, and we don't want
3056 * to drop it from the cache until it is completely in the btree.
3057 *
3058 * So, tell btrfs_drop_extents to leave this extent in the cache.
3059 * the caller is expected to unpin it and allow it to be merged
3060 * with the others.
3061 */
3062 drop_args.path = path;
3063 drop_args.start = file_pos;
3064 drop_args.end = file_pos + num_bytes;
3065 drop_args.replace_extent = true;
3066 drop_args.extent_item_size = sizeof(*stack_fi);
3067 ret = btrfs_drop_extents(trans, root, inode, &drop_args);
3068 if (ret)
3069 return ret;
3070
3071 if (!drop_args.extent_inserted) {
3072 ins.objectid = btrfs_ino(inode);
3073 ins.type = BTRFS_EXTENT_DATA_KEY;
3074 ins.offset = file_pos;
3075
3076 ret = btrfs_insert_empty_item(trans, root, path, &ins,
3077 sizeof(*stack_fi));
3078 if (ret)
3079 return ret;
3080 }
3081 leaf = path->nodes[0];
3082 btrfs_set_stack_file_extent_generation(stack_fi, trans->transid);
3083 write_extent_buffer(leaf, stack_fi,
3084 btrfs_item_ptr_offset(leaf, path->slots[0]),
3085 sizeof(struct btrfs_file_extent_item));
3086
3087 btrfs_release_path(path);
3088
3089 /*
3090 * If we dropped an inline extent here, we know the range where it is
3091 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the
3092 * number of bytes only for that range containing the inline extent.
3093 * The remaining of the range will be processed when clearing the
3094 * EXTENT_DELALLOC_BIT bit through the ordered extent completion.
3095 */
3096 if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) {
3097 u64 inline_size = round_down(drop_args.bytes_found, sectorsize);
3098
3099 inline_size = drop_args.bytes_found - inline_size;
3100 btrfs_update_inode_bytes(inode, sectorsize, inline_size);
3101 drop_args.bytes_found -= inline_size;
3102 num_bytes -= sectorsize;
3103 }
3104
3105 if (update_inode_bytes)
3106 btrfs_update_inode_bytes(inode, num_bytes, drop_args.bytes_found);
3107
3108 ins.objectid = disk_bytenr;
3109 ins.type = BTRFS_EXTENT_ITEM_KEY;
3110 ins.offset = disk_num_bytes;
3111
3112 ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes);
3113 if (ret)
3114 return ret;
3115
3116 return btrfs_alloc_reserved_file_extent(trans, root, btrfs_ino(inode),
3117 file_pos - offset,
3118 qgroup_reserved, &ins);
3119 }
3120
btrfs_release_delalloc_bytes(struct btrfs_fs_info * fs_info,u64 start,u64 len)3121 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
3122 u64 start, u64 len)
3123 {
3124 struct btrfs_block_group *cache;
3125
3126 cache = btrfs_lookup_block_group(fs_info, start);
3127 ASSERT(cache);
3128
3129 spin_lock(&cache->lock);
3130 cache->delalloc_bytes -= len;
3131 spin_unlock(&cache->lock);
3132
3133 btrfs_put_block_group(cache);
3134 }
3135
insert_ordered_extent_file_extent(struct btrfs_trans_handle * trans,struct btrfs_ordered_extent * oe)3136 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans,
3137 struct btrfs_ordered_extent *oe)
3138 {
3139 struct btrfs_file_extent_item stack_fi;
3140 bool update_inode_bytes;
3141 u64 num_bytes = oe->num_bytes;
3142 u64 ram_bytes = oe->ram_bytes;
3143
3144 memset(&stack_fi, 0, sizeof(stack_fi));
3145 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG);
3146 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr);
3147 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi,
3148 oe->disk_num_bytes);
3149 btrfs_set_stack_file_extent_offset(&stack_fi, oe->offset);
3150 if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags))
3151 num_bytes = oe->truncated_len;
3152 btrfs_set_stack_file_extent_num_bytes(&stack_fi, num_bytes);
3153 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, ram_bytes);
3154 btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type);
3155 /* Encryption and other encoding is reserved and all 0 */
3156
3157 /*
3158 * For delalloc, when completing an ordered extent we update the inode's
3159 * bytes when clearing the range in the inode's io tree, so pass false
3160 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(),
3161 * except if the ordered extent was truncated.
3162 */
3163 update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) ||
3164 test_bit(BTRFS_ORDERED_ENCODED, &oe->flags) ||
3165 test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags);
3166
3167 return insert_reserved_file_extent(trans, oe->inode,
3168 oe->file_offset, &stack_fi,
3169 update_inode_bytes, oe->qgroup_rsv);
3170 }
3171
3172 /*
3173 * As ordered data IO finishes, this gets called so we can finish
3174 * an ordered extent if the range of bytes in the file it covers are
3175 * fully written.
3176 */
btrfs_finish_one_ordered(struct btrfs_ordered_extent * ordered_extent)3177 int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
3178 {
3179 struct btrfs_inode *inode = ordered_extent->inode;
3180 struct btrfs_root *root = inode->root;
3181 struct btrfs_fs_info *fs_info = root->fs_info;
3182 struct btrfs_trans_handle *trans = NULL;
3183 struct extent_io_tree *io_tree = &inode->io_tree;
3184 struct extent_state *cached_state = NULL;
3185 u64 start, end;
3186 int compress_type = 0;
3187 int ret = 0;
3188 u64 logical_len = ordered_extent->num_bytes;
3189 bool freespace_inode;
3190 bool truncated = false;
3191 bool clear_reserved_extent = true;
3192 unsigned int clear_bits = EXTENT_DEFRAG;
3193
3194 start = ordered_extent->file_offset;
3195 end = start + ordered_extent->num_bytes - 1;
3196
3197 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3198 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
3199 !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags) &&
3200 !test_bit(BTRFS_ORDERED_ENCODED, &ordered_extent->flags))
3201 clear_bits |= EXTENT_DELALLOC_NEW;
3202
3203 freespace_inode = btrfs_is_free_space_inode(inode);
3204 if (!freespace_inode)
3205 btrfs_lockdep_acquire(fs_info, btrfs_ordered_extent);
3206
3207 if (unlikely(test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags))) {
3208 ret = -EIO;
3209 goto out;
3210 }
3211
3212 ret = btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr,
3213 ordered_extent->disk_num_bytes);
3214 if (ret)
3215 goto out;
3216
3217 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
3218 truncated = true;
3219 logical_len = ordered_extent->truncated_len;
3220 /* Truncated the entire extent, don't bother adding */
3221 if (!logical_len)
3222 goto out;
3223 }
3224
3225 /*
3226 * If it's a COW write we need to lock the extent range as we will be
3227 * inserting/replacing file extent items and unpinning an extent map.
3228 * This must be taken before joining a transaction, as it's a higher
3229 * level lock (like the inode's VFS lock), otherwise we can run into an
3230 * ABBA deadlock with other tasks (transactions work like a lock,
3231 * depending on their current state).
3232 */
3233 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
3234 clear_bits |= EXTENT_LOCKED | EXTENT_FINISHING_ORDERED;
3235 btrfs_lock_extent_bits(io_tree, start, end,
3236 EXTENT_LOCKED | EXTENT_FINISHING_ORDERED,
3237 &cached_state);
3238 }
3239
3240 if (freespace_inode)
3241 trans = btrfs_join_transaction_spacecache(root);
3242 else
3243 trans = btrfs_join_transaction(root);
3244 if (IS_ERR(trans)) {
3245 ret = PTR_ERR(trans);
3246 trans = NULL;
3247 goto out;
3248 }
3249
3250 trans->block_rsv = &inode->block_rsv;
3251
3252 ret = btrfs_insert_raid_extent(trans, ordered_extent);
3253 if (unlikely(ret)) {
3254 btrfs_abort_transaction(trans, ret);
3255 goto out;
3256 }
3257
3258 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
3259 /* Logic error */
3260 ASSERT(list_empty(&ordered_extent->list));
3261 if (unlikely(!list_empty(&ordered_extent->list))) {
3262 ret = -EINVAL;
3263 btrfs_abort_transaction(trans, ret);
3264 goto out;
3265 }
3266
3267 btrfs_inode_safe_disk_i_size_write(inode, 0);
3268 ret = btrfs_update_inode_fallback(trans, inode);
3269 if (unlikely(ret)) {
3270 /* -ENOMEM or corruption */
3271 btrfs_abort_transaction(trans, ret);
3272 }
3273 goto out;
3274 }
3275
3276 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
3277 compress_type = ordered_extent->compress_type;
3278 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3279 BUG_ON(compress_type);
3280 ret = btrfs_mark_extent_written(trans, inode,
3281 ordered_extent->file_offset,
3282 ordered_extent->file_offset +
3283 logical_len);
3284 btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr,
3285 ordered_extent->disk_num_bytes);
3286 if (unlikely(ret < 0)) {
3287 btrfs_abort_transaction(trans, ret);
3288 goto out;
3289 }
3290 } else {
3291 BUG_ON(root == fs_info->tree_root);
3292 ret = insert_ordered_extent_file_extent(trans, ordered_extent);
3293 if (unlikely(ret < 0)) {
3294 btrfs_abort_transaction(trans, ret);
3295 goto out;
3296 }
3297 clear_reserved_extent = false;
3298 btrfs_release_delalloc_bytes(fs_info,
3299 ordered_extent->disk_bytenr,
3300 ordered_extent->disk_num_bytes);
3301 }
3302
3303 ret = btrfs_unpin_extent_cache(inode, ordered_extent->file_offset,
3304 ordered_extent->num_bytes, trans->transid);
3305 if (unlikely(ret < 0)) {
3306 btrfs_abort_transaction(trans, ret);
3307 goto out;
3308 }
3309
3310 ret = add_pending_csums(trans, &ordered_extent->list);
3311 if (unlikely(ret)) {
3312 btrfs_abort_transaction(trans, ret);
3313 goto out;
3314 }
3315
3316 /*
3317 * If this is a new delalloc range, clear its new delalloc flag to
3318 * update the inode's number of bytes. This needs to be done first
3319 * before updating the inode item.
3320 */
3321 if ((clear_bits & EXTENT_DELALLOC_NEW) &&
3322 !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags))
3323 btrfs_clear_extent_bit(&inode->io_tree, start, end,
3324 EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES,
3325 &cached_state);
3326
3327 btrfs_inode_safe_disk_i_size_write(inode, 0);
3328 ret = btrfs_update_inode_fallback(trans, inode);
3329 if (unlikely(ret)) { /* -ENOMEM or corruption */
3330 btrfs_abort_transaction(trans, ret);
3331 goto out;
3332 }
3333 out:
3334 btrfs_clear_extent_bit(&inode->io_tree, start, end, clear_bits,
3335 &cached_state);
3336
3337 if (trans)
3338 btrfs_end_transaction(trans);
3339
3340 if (ret || truncated) {
3341 /*
3342 * If we failed to finish this ordered extent for any reason we
3343 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
3344 * extent, and mark the inode with the error if it wasn't
3345 * already set. Any error during writeback would have already
3346 * set the mapping error, so we need to set it if we're the ones
3347 * marking this ordered extent as failed.
3348 */
3349 if (ret)
3350 btrfs_mark_ordered_extent_error(ordered_extent);
3351
3352 /*
3353 * Drop extent maps for the part of the extent we didn't write.
3354 *
3355 * We have an exception here for the free_space_inode, this is
3356 * because when we do btrfs_get_extent() on the free space inode
3357 * we will search the commit root. If this is a new block group
3358 * we won't find anything, and we will trip over the assert in
3359 * writepage where we do ASSERT(em->block_start !=
3360 * EXTENT_MAP_HOLE).
3361 *
3362 * Theoretically we could also skip this for any NOCOW extent as
3363 * we don't mess with the extent map tree in the NOCOW case, but
3364 * for now simply skip this if we are the free space inode.
3365 */
3366 if (!btrfs_is_free_space_inode(inode)) {
3367 u64 unwritten_start = start;
3368
3369 if (truncated)
3370 unwritten_start += logical_len;
3371
3372 btrfs_drop_extent_map_range(inode, unwritten_start,
3373 end, false);
3374 }
3375
3376 /*
3377 * If the ordered extent had an IOERR or something else went
3378 * wrong we need to return the space for this ordered extent
3379 * back to the allocator. We only free the extent in the
3380 * truncated case if we didn't write out the extent at all.
3381 *
3382 * If we made it past insert_reserved_file_extent before we
3383 * errored out then we don't need to do this as the accounting
3384 * has already been done.
3385 */
3386 if ((ret || !logical_len) &&
3387 clear_reserved_extent &&
3388 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3389 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3390 /*
3391 * Discard the range before returning it back to the
3392 * free space pool
3393 */
3394 if (ret && btrfs_test_opt(fs_info, DISCARD_SYNC))
3395 btrfs_discard_extent(fs_info,
3396 ordered_extent->disk_bytenr,
3397 ordered_extent->disk_num_bytes,
3398 NULL, true);
3399 btrfs_free_reserved_extent(fs_info,
3400 ordered_extent->disk_bytenr,
3401 ordered_extent->disk_num_bytes, true);
3402 /*
3403 * Actually free the qgroup rsv which was released when
3404 * the ordered extent was created.
3405 */
3406 btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(inode->root),
3407 ordered_extent->qgroup_rsv,
3408 BTRFS_QGROUP_RSV_DATA);
3409 }
3410 }
3411
3412 /*
3413 * This needs to be done to make sure anybody waiting knows we are done
3414 * updating everything for this ordered extent.
3415 */
3416 btrfs_remove_ordered_extent(inode, ordered_extent);
3417
3418 /* once for us */
3419 btrfs_put_ordered_extent(ordered_extent);
3420 /* once for the tree */
3421 btrfs_put_ordered_extent(ordered_extent);
3422
3423 return ret;
3424 }
3425
btrfs_finish_ordered_io(struct btrfs_ordered_extent * ordered)3426 int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered)
3427 {
3428 if (btrfs_is_zoned(ordered->inode->root->fs_info) &&
3429 !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
3430 list_empty(&ordered->bioc_list))
3431 btrfs_finish_ordered_zoned(ordered);
3432 return btrfs_finish_one_ordered(ordered);
3433 }
3434
3435 /*
3436 * Calculate the checksum of an fs block at physical memory address @paddr,
3437 * and save the result to @dest.
3438 *
3439 * The folio containing @paddr must be large enough to contain a full fs block.
3440 */
btrfs_calculate_block_csum_folio(struct btrfs_fs_info * fs_info,const phys_addr_t paddr,u8 * dest)3441 void btrfs_calculate_block_csum_folio(struct btrfs_fs_info *fs_info,
3442 const phys_addr_t paddr, u8 *dest)
3443 {
3444 struct folio *folio = page_folio(phys_to_page(paddr));
3445 const u32 blocksize = fs_info->sectorsize;
3446 const u32 step = min(blocksize, PAGE_SIZE);
3447 const u32 nr_steps = blocksize / step;
3448 phys_addr_t paddrs[BTRFS_MAX_BLOCKSIZE / PAGE_SIZE];
3449
3450 /* The full block must be inside the folio. */
3451 ASSERT(offset_in_folio(folio, paddr) + blocksize <= folio_size(folio));
3452
3453 for (int i = 0; i < nr_steps; i++) {
3454 u32 pindex = offset_in_folio(folio, paddr + i * step) >> PAGE_SHIFT;
3455
3456 /*
3457 * For bs <= ps cases, we will only run the loop once, so the offset
3458 * inside the page will only added to paddrs[0].
3459 *
3460 * For bs > ps cases, the block must be page aligned, thus offset
3461 * inside the page will always be 0.
3462 */
3463 paddrs[i] = page_to_phys(folio_page(folio, pindex)) + offset_in_page(paddr);
3464 }
3465 return btrfs_calculate_block_csum_pages(fs_info, paddrs, dest);
3466 }
3467
3468 /*
3469 * Calculate the checksum of a fs block backed by multiple noncontiguous pages
3470 * at @paddrs[] and save the result to @dest.
3471 *
3472 * The folio containing @paddr must be large enough to contain a full fs block.
3473 */
btrfs_calculate_block_csum_pages(struct btrfs_fs_info * fs_info,const phys_addr_t paddrs[],u8 * dest)3474 void btrfs_calculate_block_csum_pages(struct btrfs_fs_info *fs_info,
3475 const phys_addr_t paddrs[], u8 *dest)
3476 {
3477 const u32 blocksize = fs_info->sectorsize;
3478 const u32 step = min(blocksize, PAGE_SIZE);
3479 const u32 nr_steps = blocksize / step;
3480 struct btrfs_csum_ctx csum;
3481
3482 btrfs_csum_init(&csum, fs_info->csum_type);
3483 for (int i = 0; i < nr_steps; i++) {
3484 const phys_addr_t paddr = paddrs[i];
3485 void *kaddr;
3486
3487 ASSERT(offset_in_page(paddr) + step <= PAGE_SIZE);
3488 kaddr = kmap_local_page(phys_to_page(paddr)) + offset_in_page(paddr);
3489 btrfs_csum_update(&csum, kaddr, step);
3490 kunmap_local(kaddr);
3491 }
3492 btrfs_csum_final(&csum, dest);
3493 }
3494
3495 /*
3496 * Verify the checksum for a single sector without any extra action that depend
3497 * on the type of I/O.
3498 *
3499 * @kaddr must be a properly kmapped address.
3500 */
btrfs_check_block_csum(struct btrfs_fs_info * fs_info,phys_addr_t paddr,u8 * csum,const u8 * const csum_expected)3501 int btrfs_check_block_csum(struct btrfs_fs_info *fs_info, phys_addr_t paddr, u8 *csum,
3502 const u8 * const csum_expected)
3503 {
3504 btrfs_calculate_block_csum_folio(fs_info, paddr, csum);
3505 if (unlikely(memcmp(csum, csum_expected, fs_info->csum_size) != 0))
3506 return -EIO;
3507 return 0;
3508 }
3509
3510 /*
3511 * Verify the checksum of a single data sector, which can be scattered at
3512 * different noncontiguous pages.
3513 *
3514 * @bbio: btrfs_io_bio which contains the csum
3515 * @dev: device the sector is on
3516 * @bio_offset: offset to the beginning of the bio (in bytes)
3517 * @paddrs: physical addresses which back the fs block
3518 *
3519 * Check if the checksum on a data block is valid. When a checksum mismatch is
3520 * detected, report the error and fill the corrupted range with zero.
3521 *
3522 * Return %true if the sector is ok or had no checksum to start with, else %false.
3523 */
btrfs_data_csum_ok(struct btrfs_bio * bbio,struct btrfs_device * dev,u32 bio_offset,const phys_addr_t paddrs[])3524 bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev,
3525 u32 bio_offset, const phys_addr_t paddrs[])
3526 {
3527 struct btrfs_inode *inode = bbio->inode;
3528 struct btrfs_fs_info *fs_info = inode->root->fs_info;
3529 const u32 blocksize = fs_info->sectorsize;
3530 const u32 step = min(blocksize, PAGE_SIZE);
3531 const u32 nr_steps = blocksize / step;
3532 u64 file_offset = bbio->file_offset + bio_offset;
3533 u64 end = file_offset + blocksize - 1;
3534 u8 *csum_expected;
3535 u8 csum[BTRFS_CSUM_SIZE];
3536
3537 if (!bbio->csum)
3538 return true;
3539
3540 if (btrfs_is_data_reloc_root(inode->root) &&
3541 btrfs_test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM,
3542 NULL)) {
3543 /* Skip the range without csum for data reloc inode */
3544 btrfs_clear_extent_bit(&inode->io_tree, file_offset, end,
3545 EXTENT_NODATASUM, NULL);
3546 return true;
3547 }
3548
3549 csum_expected = bbio->csum + (bio_offset >> fs_info->sectorsize_bits) *
3550 fs_info->csum_size;
3551 btrfs_calculate_block_csum_pages(fs_info, paddrs, csum);
3552 if (unlikely(memcmp(csum, csum_expected, fs_info->csum_size) != 0))
3553 goto zeroit;
3554 return true;
3555
3556 zeroit:
3557 btrfs_print_data_csum_error(inode, file_offset, csum, csum_expected,
3558 bbio->mirror_num);
3559 if (dev)
3560 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS);
3561 for (int i = 0; i < nr_steps; i++)
3562 memzero_page(phys_to_page(paddrs[i]), offset_in_page(paddrs[i]), step);
3563 return false;
3564 }
3565
3566 /*
3567 * Perform a delayed iput on @inode.
3568 *
3569 * @inode: The inode we want to perform iput on
3570 *
3571 * This function uses the generic vfs_inode::i_count to track whether we should
3572 * just decrement it (in case it's > 1) or if this is the last iput then link
3573 * the inode to the delayed iput machinery. Delayed iputs are processed at
3574 * transaction commit time/superblock commit/cleaner kthread.
3575 */
btrfs_add_delayed_iput(struct btrfs_inode * inode)3576 void btrfs_add_delayed_iput(struct btrfs_inode *inode)
3577 {
3578 struct btrfs_fs_info *fs_info = inode->root->fs_info;
3579 unsigned long flags;
3580
3581 if (atomic_add_unless(&inode->vfs_inode.i_count, -1, 1))
3582 return;
3583
3584 WARN_ON_ONCE(test_bit(BTRFS_FS_STATE_NO_DELAYED_IPUT, &fs_info->fs_state));
3585 atomic_inc(&fs_info->nr_delayed_iputs);
3586 /*
3587 * Need to be irq safe here because we can be called from either an irq
3588 * context (see bio.c and btrfs_put_ordered_extent()) or a non-irq
3589 * context.
3590 */
3591 spin_lock_irqsave(&fs_info->delayed_iput_lock, flags);
3592 ASSERT(list_empty(&inode->delayed_iput));
3593 list_add_tail(&inode->delayed_iput, &fs_info->delayed_iputs);
3594 spin_unlock_irqrestore(&fs_info->delayed_iput_lock, flags);
3595 if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags))
3596 wake_up_process(fs_info->cleaner_kthread);
3597 }
3598
run_delayed_iput_locked(struct btrfs_fs_info * fs_info,struct btrfs_inode * inode)3599 static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info,
3600 struct btrfs_inode *inode)
3601 {
3602 list_del_init(&inode->delayed_iput);
3603 spin_unlock_irq(&fs_info->delayed_iput_lock);
3604 iput(&inode->vfs_inode);
3605 if (atomic_dec_and_test(&fs_info->nr_delayed_iputs))
3606 wake_up(&fs_info->delayed_iputs_wait);
3607 spin_lock_irq(&fs_info->delayed_iput_lock);
3608 }
3609
btrfs_run_delayed_iput(struct btrfs_fs_info * fs_info,struct btrfs_inode * inode)3610 static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info,
3611 struct btrfs_inode *inode)
3612 {
3613 if (!list_empty(&inode->delayed_iput)) {
3614 spin_lock_irq(&fs_info->delayed_iput_lock);
3615 if (!list_empty(&inode->delayed_iput))
3616 run_delayed_iput_locked(fs_info, inode);
3617 spin_unlock_irq(&fs_info->delayed_iput_lock);
3618 }
3619 }
3620
btrfs_run_delayed_iputs(struct btrfs_fs_info * fs_info)3621 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
3622 {
3623 /*
3624 * btrfs_put_ordered_extent() can run in irq context (see bio.c), which
3625 * calls btrfs_add_delayed_iput() and that needs to lock
3626 * fs_info->delayed_iput_lock. So we need to disable irqs here to
3627 * prevent a deadlock.
3628 */
3629 spin_lock_irq(&fs_info->delayed_iput_lock);
3630 while (!list_empty(&fs_info->delayed_iputs)) {
3631 struct btrfs_inode *inode;
3632
3633 inode = list_first_entry(&fs_info->delayed_iputs,
3634 struct btrfs_inode, delayed_iput);
3635 run_delayed_iput_locked(fs_info, inode);
3636 if (need_resched()) {
3637 spin_unlock_irq(&fs_info->delayed_iput_lock);
3638 cond_resched();
3639 spin_lock_irq(&fs_info->delayed_iput_lock);
3640 }
3641 }
3642 spin_unlock_irq(&fs_info->delayed_iput_lock);
3643 }
3644
3645 /*
3646 * Wait for flushing all delayed iputs
3647 *
3648 * @fs_info: the filesystem
3649 *
3650 * This will wait on any delayed iputs that are currently running with KILLABLE
3651 * set. Once they are all done running we will return, unless we are killed in
3652 * which case we return EINTR. This helps in user operations like fallocate etc
3653 * that might get blocked on the iputs.
3654 *
3655 * Return EINTR if we were killed, 0 if nothing's pending
3656 */
btrfs_wait_on_delayed_iputs(struct btrfs_fs_info * fs_info)3657 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info)
3658 {
3659 int ret = wait_event_killable(fs_info->delayed_iputs_wait,
3660 atomic_read(&fs_info->nr_delayed_iputs) == 0);
3661 if (ret)
3662 return -EINTR;
3663 return 0;
3664 }
3665
3666 /*
3667 * This creates an orphan entry for the given inode in case something goes wrong
3668 * in the middle of an unlink.
3669 */
btrfs_orphan_add(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)3670 int btrfs_orphan_add(struct btrfs_trans_handle *trans,
3671 struct btrfs_inode *inode)
3672 {
3673 int ret;
3674
3675 ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode));
3676 if (unlikely(ret && ret != -EEXIST)) {
3677 btrfs_abort_transaction(trans, ret);
3678 return ret;
3679 }
3680
3681 return 0;
3682 }
3683
3684 /*
3685 * We have done the delete so we can go ahead and remove the orphan item for
3686 * this particular inode.
3687 */
btrfs_orphan_del(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)3688 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3689 struct btrfs_inode *inode)
3690 {
3691 return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode));
3692 }
3693
3694 /*
3695 * this cleans up any orphans that may be left on the list from the last use
3696 * of this root.
3697 */
btrfs_orphan_cleanup(struct btrfs_root * root)3698 int btrfs_orphan_cleanup(struct btrfs_root *root)
3699 {
3700 struct btrfs_fs_info *fs_info = root->fs_info;
3701 BTRFS_PATH_AUTO_FREE(path);
3702 struct extent_buffer *leaf;
3703 struct btrfs_key key, found_key;
3704 struct btrfs_trans_handle *trans;
3705 u64 last_objectid = 0;
3706 int ret = 0, nr_unlink = 0;
3707
3708 if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state))
3709 return 0;
3710
3711 path = btrfs_alloc_path();
3712 if (!path) {
3713 ret = -ENOMEM;
3714 goto out;
3715 }
3716 path->reada = READA_BACK;
3717
3718 key.objectid = BTRFS_ORPHAN_OBJECTID;
3719 key.type = BTRFS_ORPHAN_ITEM_KEY;
3720 key.offset = (u64)-1;
3721
3722 while (1) {
3723 struct btrfs_inode *inode;
3724
3725 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3726 if (ret < 0)
3727 goto out;
3728
3729 /*
3730 * if ret == 0 means we found what we were searching for, which
3731 * is weird, but possible, so only screw with path if we didn't
3732 * find the key and see if we have stuff that matches
3733 */
3734 if (ret > 0) {
3735 ret = 0;
3736 if (path->slots[0] == 0)
3737 break;
3738 path->slots[0]--;
3739 }
3740
3741 /* pull out the item */
3742 leaf = path->nodes[0];
3743 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3744
3745 /* make sure the item matches what we want */
3746 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3747 break;
3748 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
3749 break;
3750
3751 /* release the path since we're done with it */
3752 btrfs_release_path(path);
3753
3754 /*
3755 * this is where we are basically btrfs_lookup, without the
3756 * crossing root thing. we store the inode number in the
3757 * offset of the orphan item.
3758 */
3759
3760 if (found_key.offset == last_objectid) {
3761 /*
3762 * We found the same inode as before. This means we were
3763 * not able to remove its items via eviction triggered
3764 * by an iput(). A transaction abort may have happened,
3765 * due to -ENOSPC for example, so try to grab the error
3766 * that lead to a transaction abort, if any.
3767 */
3768 btrfs_err(fs_info,
3769 "Error removing orphan entry, stopping orphan cleanup");
3770 ret = BTRFS_FS_ERROR(fs_info) ?: -EINVAL;
3771 goto out;
3772 }
3773
3774 last_objectid = found_key.offset;
3775
3776 found_key.objectid = found_key.offset;
3777 found_key.type = BTRFS_INODE_ITEM_KEY;
3778 found_key.offset = 0;
3779 inode = btrfs_iget(last_objectid, root);
3780 if (IS_ERR(inode)) {
3781 ret = PTR_ERR(inode);
3782 inode = NULL;
3783 if (ret != -ENOENT)
3784 goto out;
3785 }
3786
3787 if (!inode && root == fs_info->tree_root) {
3788 struct btrfs_root *dead_root;
3789 int is_dead_root = 0;
3790
3791 /*
3792 * This is an orphan in the tree root. Currently these
3793 * could come from 2 sources:
3794 * a) a root (snapshot/subvolume) deletion in progress
3795 * b) a free space cache inode
3796 * We need to distinguish those two, as the orphan item
3797 * for a root must not get deleted before the deletion
3798 * of the snapshot/subvolume's tree completes.
3799 *
3800 * btrfs_find_orphan_roots() ran before us, which has
3801 * found all deleted roots and loaded them into
3802 * fs_info->fs_roots_radix. So here we can find if an
3803 * orphan item corresponds to a deleted root by looking
3804 * up the root from that radix tree.
3805 */
3806
3807 spin_lock(&fs_info->fs_roots_radix_lock);
3808 dead_root = radix_tree_lookup(&fs_info->fs_roots_radix,
3809 (unsigned long)found_key.objectid);
3810 if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0)
3811 is_dead_root = 1;
3812 spin_unlock(&fs_info->fs_roots_radix_lock);
3813
3814 if (is_dead_root) {
3815 /* prevent this orphan from being found again */
3816 key.offset = found_key.objectid - 1;
3817 continue;
3818 }
3819
3820 }
3821
3822 /*
3823 * If we have an inode with links, there are a couple of
3824 * possibilities:
3825 *
3826 * 1. We were halfway through creating fsverity metadata for the
3827 * file. In that case, the orphan item represents incomplete
3828 * fsverity metadata which must be cleaned up with
3829 * btrfs_drop_verity_items and deleting the orphan item.
3830
3831 * 2. Old kernels (before v3.12) used to create an
3832 * orphan item for truncate indicating that there were possibly
3833 * extent items past i_size that needed to be deleted. In v3.12,
3834 * truncate was changed to update i_size in sync with the extent
3835 * items, but the (useless) orphan item was still created. Since
3836 * v4.18, we don't create the orphan item for truncate at all.
3837 *
3838 * So, this item could mean that we need to do a truncate, but
3839 * only if this filesystem was last used on a pre-v3.12 kernel
3840 * and was not cleanly unmounted. The odds of that are quite
3841 * slim, and it's a pain to do the truncate now, so just delete
3842 * the orphan item.
3843 *
3844 * It's also possible that this orphan item was supposed to be
3845 * deleted but wasn't. The inode number may have been reused,
3846 * but either way, we can delete the orphan item.
3847 */
3848 if (!inode || inode->vfs_inode.i_nlink) {
3849 if (inode) {
3850 ret = btrfs_drop_verity_items(inode);
3851 iput(&inode->vfs_inode);
3852 inode = NULL;
3853 if (ret)
3854 goto out;
3855 }
3856 trans = btrfs_start_transaction(root, 1);
3857 if (IS_ERR(trans)) {
3858 ret = PTR_ERR(trans);
3859 goto out;
3860 }
3861 btrfs_debug(fs_info, "auto deleting %Lu",
3862 found_key.objectid);
3863 ret = btrfs_del_orphan_item(trans, root,
3864 found_key.objectid);
3865 btrfs_end_transaction(trans);
3866 if (ret)
3867 goto out;
3868 continue;
3869 }
3870
3871 nr_unlink++;
3872
3873 /* this will do delete_inode and everything for us */
3874 iput(&inode->vfs_inode);
3875 }
3876 /* release the path since we're done with it */
3877 btrfs_release_path(path);
3878
3879 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
3880 trans = btrfs_join_transaction(root);
3881 if (!IS_ERR(trans))
3882 btrfs_end_transaction(trans);
3883 }
3884
3885 if (nr_unlink)
3886 btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink);
3887
3888 out:
3889 if (ret)
3890 btrfs_err(fs_info, "could not do orphan cleanup %d", ret);
3891 return ret;
3892 }
3893
3894 /*
3895 * Look ahead in the leaf for xattrs. If we don't find any then we know there
3896 * can't be any ACLs.
3897 *
3898 * @leaf: the eb leaf where to search
3899 * @slot: the slot the inode is in
3900 * @objectid: the objectid of the inode
3901 *
3902 * Return true if there is xattr/ACL, false otherwise.
3903 */
acls_after_inode_item(struct extent_buffer * leaf,int slot,u64 objectid,int * first_xattr_slot)3904 static noinline bool acls_after_inode_item(struct extent_buffer *leaf,
3905 int slot, u64 objectid,
3906 int *first_xattr_slot)
3907 {
3908 u32 nritems = btrfs_header_nritems(leaf);
3909 struct btrfs_key found_key;
3910 static u64 xattr_access = 0;
3911 static u64 xattr_default = 0;
3912 int scanned = 0;
3913
3914 if (!xattr_access) {
3915 xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS,
3916 strlen(XATTR_NAME_POSIX_ACL_ACCESS));
3917 xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT,
3918 strlen(XATTR_NAME_POSIX_ACL_DEFAULT));
3919 }
3920
3921 slot++;
3922 *first_xattr_slot = -1;
3923 while (slot < nritems) {
3924 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3925
3926 /* We found a different objectid, there must be no ACLs. */
3927 if (found_key.objectid != objectid)
3928 return false;
3929
3930 /* We found an xattr, assume we've got an ACL. */
3931 if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3932 if (*first_xattr_slot == -1)
3933 *first_xattr_slot = slot;
3934 if (found_key.offset == xattr_access ||
3935 found_key.offset == xattr_default)
3936 return true;
3937 }
3938
3939 /*
3940 * We found a key greater than an xattr key, there can't be any
3941 * ACLs later on.
3942 */
3943 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3944 return false;
3945
3946 slot++;
3947 scanned++;
3948
3949 /*
3950 * The item order goes like:
3951 * - inode
3952 * - inode backrefs
3953 * - xattrs
3954 * - extents,
3955 *
3956 * so if there are lots of hard links to an inode there can be
3957 * a lot of backrefs. Don't waste time searching too hard,
3958 * this is just an optimization.
3959 */
3960 if (scanned >= 8)
3961 break;
3962 }
3963 /*
3964 * We hit the end of the leaf before we found an xattr or something
3965 * larger than an xattr. We have to assume the inode has ACLs.
3966 */
3967 if (*first_xattr_slot == -1)
3968 *first_xattr_slot = slot;
3969 return true;
3970 }
3971
btrfs_init_file_extent_tree(struct btrfs_inode * inode)3972 static int btrfs_init_file_extent_tree(struct btrfs_inode *inode)
3973 {
3974 struct btrfs_fs_info *fs_info = inode->root->fs_info;
3975
3976 if (WARN_ON_ONCE(inode->file_extent_tree))
3977 return 0;
3978 if (btrfs_fs_incompat(fs_info, NO_HOLES))
3979 return 0;
3980 if (!S_ISREG(inode->vfs_inode.i_mode))
3981 return 0;
3982 if (btrfs_is_free_space_inode(inode))
3983 return 0;
3984
3985 inode->file_extent_tree = kmalloc_obj(struct extent_io_tree);
3986 if (!inode->file_extent_tree)
3987 return -ENOMEM;
3988
3989 btrfs_extent_io_tree_init(fs_info, inode->file_extent_tree,
3990 IO_TREE_INODE_FILE_EXTENT);
3991 /* Lockdep class is set only for the file extent tree. */
3992 lockdep_set_class(&inode->file_extent_tree->lock, &file_extent_tree_class);
3993
3994 return 0;
3995 }
3996
btrfs_add_inode_to_root(struct btrfs_inode * inode,bool prealloc)3997 static int btrfs_add_inode_to_root(struct btrfs_inode *inode, bool prealloc)
3998 {
3999 struct btrfs_root *root = inode->root;
4000 struct btrfs_inode *existing;
4001 const u64 ino = btrfs_ino(inode);
4002 int ret;
4003
4004 if (inode_unhashed(&inode->vfs_inode))
4005 return 0;
4006
4007 if (prealloc) {
4008 ret = xa_reserve(&root->inodes, ino, GFP_NOFS);
4009 if (ret)
4010 return ret;
4011 }
4012
4013 existing = xa_store(&root->inodes, ino, inode, GFP_ATOMIC);
4014
4015 if (xa_is_err(existing)) {
4016 ret = xa_err(existing);
4017 ASSERT(ret != -EINVAL);
4018 ASSERT(ret != -ENOMEM);
4019 return ret;
4020 } else if (existing) {
4021 WARN_ON(!(inode_state_read_once(&existing->vfs_inode) & (I_WILL_FREE | I_FREEING)));
4022 }
4023
4024 return 0;
4025 }
4026
4027 /*
4028 * Read a locked inode from the btree into the in-memory inode and add it to
4029 * its root list/tree.
4030 *
4031 * On failure clean up the inode.
4032 */
btrfs_read_locked_inode(struct btrfs_inode * inode,struct btrfs_path * path)4033 static int btrfs_read_locked_inode(struct btrfs_inode *inode, struct btrfs_path *path)
4034 {
4035 struct btrfs_root *root = inode->root;
4036 struct btrfs_fs_info *fs_info = root->fs_info;
4037 struct extent_buffer *leaf;
4038 struct btrfs_inode_item *inode_item;
4039 struct inode *vfs_inode = &inode->vfs_inode;
4040 struct btrfs_key location;
4041 unsigned long ptr;
4042 int maybe_acls;
4043 u32 rdev;
4044 int ret;
4045 bool filled = false;
4046 int first_xattr_slot;
4047
4048 ret = btrfs_fill_inode(inode, &rdev);
4049 if (!ret)
4050 filled = true;
4051
4052 ASSERT(path);
4053
4054 btrfs_get_inode_key(inode, &location);
4055
4056 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
4057 if (ret) {
4058 /*
4059 * ret > 0 can come from btrfs_search_slot called by
4060 * btrfs_lookup_inode(), this means the inode was not found.
4061 */
4062 if (ret > 0)
4063 ret = -ENOENT;
4064 goto out;
4065 }
4066
4067 leaf = path->nodes[0];
4068
4069 if (filled)
4070 goto cache_index;
4071
4072 inode_item = btrfs_item_ptr(leaf, path->slots[0],
4073 struct btrfs_inode_item);
4074 vfs_inode->i_mode = btrfs_inode_mode(leaf, inode_item);
4075 set_nlink(vfs_inode, btrfs_inode_nlink(leaf, inode_item));
4076 i_uid_write(vfs_inode, btrfs_inode_uid(leaf, inode_item));
4077 i_gid_write(vfs_inode, btrfs_inode_gid(leaf, inode_item));
4078 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
4079
4080 inode_set_atime(vfs_inode, btrfs_timespec_sec(leaf, &inode_item->atime),
4081 btrfs_timespec_nsec(leaf, &inode_item->atime));
4082
4083 inode_set_mtime(vfs_inode, btrfs_timespec_sec(leaf, &inode_item->mtime),
4084 btrfs_timespec_nsec(leaf, &inode_item->mtime));
4085
4086 inode_set_ctime(vfs_inode, btrfs_timespec_sec(leaf, &inode_item->ctime),
4087 btrfs_timespec_nsec(leaf, &inode_item->ctime));
4088
4089 inode->i_otime_sec = btrfs_timespec_sec(leaf, &inode_item->otime);
4090 inode->i_otime_nsec = btrfs_timespec_nsec(leaf, &inode_item->otime);
4091
4092 inode_set_bytes(vfs_inode, btrfs_inode_nbytes(leaf, inode_item));
4093 inode->generation = btrfs_inode_generation(leaf, inode_item);
4094 inode->last_trans = btrfs_inode_transid(leaf, inode_item);
4095
4096 inode_set_iversion_queried(vfs_inode, btrfs_inode_sequence(leaf, inode_item));
4097 vfs_inode->i_generation = inode->generation;
4098 vfs_inode->i_rdev = 0;
4099 rdev = btrfs_inode_rdev(leaf, inode_item);
4100
4101 if (S_ISDIR(vfs_inode->i_mode))
4102 inode->index_cnt = (u64)-1;
4103
4104 btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item),
4105 &inode->flags, &inode->ro_flags);
4106 btrfs_update_inode_mapping_flags(inode);
4107 btrfs_set_inode_mapping_order(inode);
4108
4109 cache_index:
4110 /*
4111 * If we were modified in the current generation and evicted from memory
4112 * and then re-read we need to do a full sync since we don't have any
4113 * idea about which extents were modified before we were evicted from
4114 * cache.
4115 *
4116 * This is required for both inode re-read from disk and delayed inode
4117 * in the delayed_nodes xarray.
4118 */
4119 if (inode->last_trans == btrfs_get_fs_generation(fs_info))
4120 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
4121
4122 /*
4123 * We don't persist the id of the transaction where an unlink operation
4124 * against the inode was last made. So here we assume the inode might
4125 * have been evicted, and therefore the exact value of last_unlink_trans
4126 * lost, and set it to last_trans to avoid metadata inconsistencies
4127 * between the inode and its parent if the inode is fsync'ed and the log
4128 * replayed. For example, in the scenario:
4129 *
4130 * touch mydir/foo
4131 * ln mydir/foo mydir/bar
4132 * sync
4133 * unlink mydir/bar
4134 * echo 2 > /proc/sys/vm/drop_caches # evicts inode
4135 * xfs_io -c fsync mydir/foo
4136 * <power failure>
4137 * mount fs, triggers fsync log replay
4138 *
4139 * We must make sure that when we fsync our inode foo we also log its
4140 * parent inode, otherwise after log replay the parent still has the
4141 * dentry with the "bar" name but our inode foo has a link count of 1
4142 * and doesn't have an inode ref with the name "bar" anymore.
4143 *
4144 * Setting last_unlink_trans to last_trans is a pessimistic approach,
4145 * but it guarantees correctness at the expense of occasional full
4146 * transaction commits on fsync if our inode is a directory, or if our
4147 * inode is not a directory, logging its parent unnecessarily.
4148 */
4149 inode->last_unlink_trans = inode->last_trans;
4150
4151 /*
4152 * Same logic as for last_unlink_trans. We don't persist the generation
4153 * of the last transaction where this inode was used for a reflink
4154 * operation, so after eviction and reloading the inode we must be
4155 * pessimistic and assume the last transaction that modified the inode.
4156 */
4157 inode->last_reflink_trans = inode->last_trans;
4158
4159 path->slots[0]++;
4160 if (vfs_inode->i_nlink != 1 ||
4161 path->slots[0] >= btrfs_header_nritems(leaf))
4162 goto cache_acl;
4163
4164 btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
4165 if (location.objectid != btrfs_ino(inode))
4166 goto cache_acl;
4167
4168 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4169 if (location.type == BTRFS_INODE_REF_KEY) {
4170 struct btrfs_inode_ref *ref;
4171
4172 ref = (struct btrfs_inode_ref *)ptr;
4173 inode->dir_index = btrfs_inode_ref_index(leaf, ref);
4174 } else if (location.type == BTRFS_INODE_EXTREF_KEY) {
4175 struct btrfs_inode_extref *extref;
4176
4177 extref = (struct btrfs_inode_extref *)ptr;
4178 inode->dir_index = btrfs_inode_extref_index(leaf, extref);
4179 }
4180 cache_acl:
4181 /*
4182 * try to precache a NULL acl entry for files that don't have
4183 * any xattrs or acls
4184 */
4185 maybe_acls = acls_after_inode_item(leaf, path->slots[0],
4186 btrfs_ino(inode), &first_xattr_slot);
4187 if (first_xattr_slot != -1) {
4188 path->slots[0] = first_xattr_slot;
4189 ret = btrfs_load_inode_props(inode, path);
4190 if (ret)
4191 btrfs_err(fs_info,
4192 "error loading props for ino %llu (root %llu): %d",
4193 btrfs_ino(inode), btrfs_root_id(root), ret);
4194 }
4195
4196 /*
4197 * We don't need the path anymore, so release it to avoid holding a read
4198 * lock on a leaf while calling btrfs_init_file_extent_tree(), which can
4199 * allocate memory that triggers reclaim (GFP_KERNEL) and cause a locking
4200 * dependency.
4201 */
4202 btrfs_release_path(path);
4203
4204 ret = btrfs_init_file_extent_tree(inode);
4205 if (ret)
4206 goto out;
4207 btrfs_inode_set_file_extent_range(inode, 0,
4208 round_up(i_size_read(vfs_inode), fs_info->sectorsize));
4209
4210 if (!maybe_acls)
4211 cache_no_acl(vfs_inode);
4212
4213 switch (vfs_inode->i_mode & S_IFMT) {
4214 case S_IFREG:
4215 vfs_inode->i_mapping->a_ops = &btrfs_aops;
4216 vfs_inode->i_fop = &btrfs_file_operations;
4217 vfs_inode->i_op = &btrfs_file_inode_operations;
4218 break;
4219 case S_IFDIR:
4220 vfs_inode->i_fop = &btrfs_dir_file_operations;
4221 vfs_inode->i_op = &btrfs_dir_inode_operations;
4222 break;
4223 case S_IFLNK:
4224 vfs_inode->i_op = &btrfs_symlink_inode_operations;
4225 inode_nohighmem(vfs_inode);
4226 vfs_inode->i_mapping->a_ops = &btrfs_aops;
4227 break;
4228 default:
4229 vfs_inode->i_op = &btrfs_special_inode_operations;
4230 init_special_inode(vfs_inode, vfs_inode->i_mode, rdev);
4231 break;
4232 }
4233
4234 btrfs_sync_inode_flags_to_i_flags(inode);
4235
4236 ret = btrfs_add_inode_to_root(inode, true);
4237 if (ret)
4238 goto out;
4239
4240 return 0;
4241 out:
4242 /*
4243 * We may have a read locked leaf and iget_failed() triggers inode
4244 * eviction which needs to release the delayed inode and that needs
4245 * to lock the delayed inode's mutex. This can cause a ABBA deadlock
4246 * with a task running delayed items, as that require first locking
4247 * the delayed inode's mutex and then modifying its subvolume btree.
4248 * So release the path before iget_failed().
4249 */
4250 btrfs_release_path(path);
4251 iget_failed(vfs_inode);
4252 return ret;
4253 }
4254
4255 /*
4256 * given a leaf and an inode, copy the inode fields into the leaf
4257 */
fill_inode_item(struct btrfs_trans_handle * trans,struct extent_buffer * leaf,struct btrfs_inode_item * item,struct inode * inode)4258 static void fill_inode_item(struct btrfs_trans_handle *trans,
4259 struct extent_buffer *leaf,
4260 struct btrfs_inode_item *item,
4261 struct inode *inode)
4262 {
4263 u64 flags;
4264
4265 btrfs_set_inode_uid(leaf, item, i_uid_read(inode));
4266 btrfs_set_inode_gid(leaf, item, i_gid_read(inode));
4267 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
4268 btrfs_set_inode_mode(leaf, item, inode->i_mode);
4269 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
4270
4271 btrfs_set_timespec_sec(leaf, &item->atime, inode_get_atime_sec(inode));
4272 btrfs_set_timespec_nsec(leaf, &item->atime, inode_get_atime_nsec(inode));
4273
4274 btrfs_set_timespec_sec(leaf, &item->mtime, inode_get_mtime_sec(inode));
4275 btrfs_set_timespec_nsec(leaf, &item->mtime, inode_get_mtime_nsec(inode));
4276
4277 btrfs_set_timespec_sec(leaf, &item->ctime, inode_get_ctime_sec(inode));
4278 btrfs_set_timespec_nsec(leaf, &item->ctime, inode_get_ctime_nsec(inode));
4279
4280 btrfs_set_timespec_sec(leaf, &item->otime, BTRFS_I(inode)->i_otime_sec);
4281 btrfs_set_timespec_nsec(leaf, &item->otime, BTRFS_I(inode)->i_otime_nsec);
4282
4283 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
4284 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
4285 btrfs_set_inode_sequence(leaf, item, inode_peek_iversion(inode));
4286 btrfs_set_inode_transid(leaf, item, trans->transid);
4287 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
4288 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
4289 BTRFS_I(inode)->ro_flags);
4290 btrfs_set_inode_flags(leaf, item, flags);
4291 btrfs_set_inode_block_group(leaf, item, 0);
4292 }
4293
4294 /*
4295 * copy everything in the in-memory inode into the btree.
4296 */
btrfs_update_inode_item(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)4297 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
4298 struct btrfs_inode *inode)
4299 {
4300 struct btrfs_inode_item *inode_item;
4301 BTRFS_PATH_AUTO_FREE(path);
4302 struct extent_buffer *leaf;
4303 struct btrfs_key key;
4304 int ret;
4305
4306 path = btrfs_alloc_path();
4307 if (!path)
4308 return -ENOMEM;
4309
4310 btrfs_get_inode_key(inode, &key);
4311 ret = btrfs_lookup_inode(trans, inode->root, path, &key, 1);
4312 if (ret) {
4313 if (ret > 0)
4314 ret = -ENOENT;
4315 return ret;
4316 }
4317
4318 leaf = path->nodes[0];
4319 inode_item = btrfs_item_ptr(leaf, path->slots[0],
4320 struct btrfs_inode_item);
4321
4322 fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode);
4323 btrfs_set_inode_last_trans(trans, inode);
4324 return 0;
4325 }
4326
4327 /*
4328 * copy everything in the in-memory inode into the btree.
4329 */
btrfs_update_inode(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)4330 int btrfs_update_inode(struct btrfs_trans_handle *trans,
4331 struct btrfs_inode *inode)
4332 {
4333 struct btrfs_root *root = inode->root;
4334 struct btrfs_fs_info *fs_info = root->fs_info;
4335 int ret;
4336
4337 /*
4338 * If the inode is a free space inode, we can deadlock during commit
4339 * if we put it into the delayed code.
4340 *
4341 * The data relocation inode should also be directly updated
4342 * without delay
4343 */
4344 if (!btrfs_is_free_space_inode(inode)
4345 && !btrfs_is_data_reloc_root(root)
4346 && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
4347 btrfs_update_root_times(trans, root);
4348
4349 ret = btrfs_delayed_update_inode(trans, inode);
4350 if (!ret)
4351 btrfs_set_inode_last_trans(trans, inode);
4352 return ret;
4353 }
4354
4355 return btrfs_update_inode_item(trans, inode);
4356 }
4357
btrfs_update_inode_fallback(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)4358 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
4359 struct btrfs_inode *inode)
4360 {
4361 int ret;
4362
4363 ret = btrfs_update_inode(trans, inode);
4364 if (ret == -ENOSPC)
4365 return btrfs_update_inode_item(trans, inode);
4366 return ret;
4367 }
4368
update_time_after_link_or_unlink(struct btrfs_inode * dir)4369 static void update_time_after_link_or_unlink(struct btrfs_inode *dir)
4370 {
4371 struct timespec64 now;
4372
4373 /*
4374 * If we are replaying a log tree, we do not want to update the mtime
4375 * and ctime of the parent directory with the current time, since the
4376 * log replay procedure is responsible for setting them to their correct
4377 * values (the ones it had when the fsync was done).
4378 */
4379 if (test_bit(BTRFS_FS_LOG_RECOVERING, &dir->root->fs_info->flags))
4380 return;
4381
4382 now = inode_set_ctime_current(&dir->vfs_inode);
4383 inode_set_mtime_to_ts(&dir->vfs_inode, now);
4384 }
4385
4386 /*
4387 * unlink helper that gets used here in inode.c and in the tree logging
4388 * recovery code. It remove a link in a directory with a given name, and
4389 * also drops the back refs in the inode to the directory
4390 */
__btrfs_unlink_inode(struct btrfs_trans_handle * trans,struct btrfs_inode * dir,struct btrfs_inode * inode,const struct fscrypt_str * name,struct btrfs_rename_ctx * rename_ctx)4391 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4392 struct btrfs_inode *dir,
4393 struct btrfs_inode *inode,
4394 const struct fscrypt_str *name,
4395 struct btrfs_rename_ctx *rename_ctx)
4396 {
4397 struct btrfs_root *root = dir->root;
4398 struct btrfs_fs_info *fs_info = root->fs_info;
4399 struct btrfs_path *path;
4400 int ret = 0;
4401 struct btrfs_dir_item *di;
4402 u64 index;
4403 u64 ino = btrfs_ino(inode);
4404 u64 dir_ino = btrfs_ino(dir);
4405
4406 path = btrfs_alloc_path();
4407 if (!path)
4408 return -ENOMEM;
4409
4410 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, -1);
4411 if (IS_ERR_OR_NULL(di)) {
4412 btrfs_free_path(path);
4413 return di ? PTR_ERR(di) : -ENOENT;
4414 }
4415 ret = btrfs_delete_one_dir_name(trans, root, path, di);
4416 /*
4417 * Down the call chains below we'll also need to allocate a path, so no
4418 * need to hold on to this one for longer than necessary.
4419 */
4420 btrfs_free_path(path);
4421 if (ret)
4422 return ret;
4423
4424 /*
4425 * If we don't have dir index, we have to get it by looking up
4426 * the inode ref, since we get the inode ref, remove it directly,
4427 * it is unnecessary to do delayed deletion.
4428 *
4429 * But if we have dir index, needn't search inode ref to get it.
4430 * Since the inode ref is close to the inode item, it is better
4431 * that we delay to delete it, and just do this deletion when
4432 * we update the inode item.
4433 */
4434 if (inode->dir_index) {
4435 ret = btrfs_delayed_delete_inode_ref(inode);
4436 if (!ret) {
4437 index = inode->dir_index;
4438 goto skip_backref;
4439 }
4440 }
4441
4442 ret = btrfs_del_inode_ref(trans, root, name, ino, dir_ino, &index);
4443 if (unlikely(ret)) {
4444 btrfs_crit(fs_info,
4445 "failed to delete reference to %.*s, root %llu inode %llu parent %llu",
4446 name->len, name->name, btrfs_root_id(root), ino, dir_ino);
4447 btrfs_abort_transaction(trans, ret);
4448 return ret;
4449 }
4450 skip_backref:
4451 if (rename_ctx)
4452 rename_ctx->index = index;
4453
4454 ret = btrfs_delete_delayed_dir_index(trans, dir, index);
4455 if (unlikely(ret)) {
4456 btrfs_abort_transaction(trans, ret);
4457 return ret;
4458 }
4459
4460 /*
4461 * If we are in a rename context, we don't need to update anything in the
4462 * log. That will be done later during the rename by btrfs_log_new_name().
4463 * Besides that, doing it here would only cause extra unnecessary btree
4464 * operations on the log tree, increasing latency for applications.
4465 */
4466 if (!rename_ctx) {
4467 btrfs_del_inode_ref_in_log(trans, name, inode, dir);
4468 btrfs_del_dir_entries_in_log(trans, name, dir, index);
4469 }
4470
4471 /*
4472 * If we have a pending delayed iput we could end up with the final iput
4473 * being run in btrfs-cleaner context. If we have enough of these built
4474 * up we can end up burning a lot of time in btrfs-cleaner without any
4475 * way to throttle the unlinks. Since we're currently holding a ref on
4476 * the inode we can run the delayed iput here without any issues as the
4477 * final iput won't be done until after we drop the ref we're currently
4478 * holding.
4479 */
4480 btrfs_run_delayed_iput(fs_info, inode);
4481
4482 btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2);
4483 inode_inc_iversion(&inode->vfs_inode);
4484 inode_set_ctime_current(&inode->vfs_inode);
4485 inode_inc_iversion(&dir->vfs_inode);
4486 update_time_after_link_or_unlink(dir);
4487
4488 return btrfs_update_inode(trans, dir);
4489 }
4490
btrfs_unlink_inode(struct btrfs_trans_handle * trans,struct btrfs_inode * dir,struct btrfs_inode * inode,const struct fscrypt_str * name)4491 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4492 struct btrfs_inode *dir, struct btrfs_inode *inode,
4493 const struct fscrypt_str *name)
4494 {
4495 int ret;
4496
4497 ret = __btrfs_unlink_inode(trans, dir, inode, name, NULL);
4498 if (!ret) {
4499 drop_nlink(&inode->vfs_inode);
4500 ret = btrfs_update_inode(trans, inode);
4501 }
4502 return ret;
4503 }
4504
4505 /*
4506 * helper to start transaction for unlink and rmdir.
4507 *
4508 * unlink and rmdir are special in btrfs, they do not always free space, so
4509 * if we cannot make our reservations the normal way try and see if there is
4510 * plenty of slack room in the global reserve to migrate, otherwise we cannot
4511 * allow the unlink to occur.
4512 */
__unlink_start_trans(struct btrfs_inode * dir)4513 static struct btrfs_trans_handle *__unlink_start_trans(struct btrfs_inode *dir)
4514 {
4515 struct btrfs_root *root = dir->root;
4516
4517 return btrfs_start_transaction_fallback_global_rsv(root,
4518 BTRFS_UNLINK_METADATA_UNITS);
4519 }
4520
btrfs_unlink(struct inode * dir,struct dentry * dentry)4521 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
4522 {
4523 struct btrfs_trans_handle *trans;
4524 struct inode *inode = d_inode(dentry);
4525 int ret;
4526 struct fscrypt_name fname;
4527
4528 ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
4529 if (ret)
4530 return ret;
4531
4532 /* This needs to handle no-key deletions later on */
4533
4534 trans = __unlink_start_trans(BTRFS_I(dir));
4535 if (IS_ERR(trans)) {
4536 ret = PTR_ERR(trans);
4537 goto fscrypt_free;
4538 }
4539
4540 btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4541 false);
4542
4543 ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4544 &fname.disk_name);
4545 if (ret)
4546 goto end_trans;
4547
4548 if (inode->i_nlink == 0) {
4549 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
4550 if (ret)
4551 goto end_trans;
4552 }
4553
4554 end_trans:
4555 btrfs_end_transaction(trans);
4556 btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info);
4557 fscrypt_free:
4558 fscrypt_free_filename(&fname);
4559 return ret;
4560 }
4561
btrfs_unlink_subvol(struct btrfs_trans_handle * trans,struct btrfs_inode * dir,struct dentry * dentry)4562 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4563 struct btrfs_inode *dir, struct dentry *dentry)
4564 {
4565 struct btrfs_root *root = dir->root;
4566 struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
4567 BTRFS_PATH_AUTO_FREE(path);
4568 struct extent_buffer *leaf;
4569 struct btrfs_dir_item *di;
4570 struct btrfs_key key;
4571 u64 index;
4572 int ret;
4573 u64 objectid;
4574 u64 dir_ino = btrfs_ino(dir);
4575 struct fscrypt_name fname;
4576
4577 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname);
4578 if (ret)
4579 return ret;
4580
4581 /* This needs to handle no-key deletions later on */
4582
4583 if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
4584 objectid = btrfs_root_id(inode->root);
4585 } else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4586 objectid = inode->ref_root_id;
4587 } else {
4588 WARN_ON(1);
4589 fscrypt_free_filename(&fname);
4590 return -EINVAL;
4591 }
4592
4593 path = btrfs_alloc_path();
4594 if (!path) {
4595 ret = -ENOMEM;
4596 goto out;
4597 }
4598
4599 di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4600 &fname.disk_name, -1);
4601 if (IS_ERR_OR_NULL(di)) {
4602 ret = di ? PTR_ERR(di) : -ENOENT;
4603 goto out;
4604 }
4605
4606 leaf = path->nodes[0];
4607 btrfs_dir_item_key_to_cpu(leaf, di, &key);
4608 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
4609 ret = btrfs_delete_one_dir_name(trans, root, path, di);
4610 if (unlikely(ret)) {
4611 btrfs_abort_transaction(trans, ret);
4612 goto out;
4613 }
4614 btrfs_release_path(path);
4615
4616 /*
4617 * This is a placeholder inode for a subvolume we didn't have a
4618 * reference to at the time of the snapshot creation. In the meantime
4619 * we could have renamed the real subvol link into our snapshot, so
4620 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect.
4621 * Instead simply lookup the dir_index_item for this entry so we can
4622 * remove it. Otherwise we know we have a ref to the root and we can
4623 * call btrfs_del_root_ref, and it _shouldn't_ fail.
4624 */
4625 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4626 di = btrfs_search_dir_index_item(root, path, dir_ino, &fname.disk_name);
4627 if (IS_ERR(di)) {
4628 ret = PTR_ERR(di);
4629 btrfs_abort_transaction(trans, ret);
4630 goto out;
4631 }
4632
4633 leaf = path->nodes[0];
4634 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4635 index = key.offset;
4636 btrfs_release_path(path);
4637 } else {
4638 ret = btrfs_del_root_ref(trans, objectid,
4639 btrfs_root_id(root), dir_ino,
4640 &index, &fname.disk_name);
4641 if (unlikely(ret)) {
4642 btrfs_abort_transaction(trans, ret);
4643 goto out;
4644 }
4645 }
4646
4647 ret = btrfs_delete_delayed_dir_index(trans, dir, index);
4648 if (unlikely(ret)) {
4649 btrfs_abort_transaction(trans, ret);
4650 goto out;
4651 }
4652
4653 btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2);
4654 inode_inc_iversion(&dir->vfs_inode);
4655 inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode));
4656 ret = btrfs_update_inode_fallback(trans, dir);
4657 if (ret)
4658 btrfs_abort_transaction(trans, ret);
4659 out:
4660 fscrypt_free_filename(&fname);
4661 return ret;
4662 }
4663
4664 /*
4665 * Helper to check if the subvolume references other subvolumes or if it's
4666 * default.
4667 */
may_destroy_subvol(struct btrfs_root * root)4668 static noinline int may_destroy_subvol(struct btrfs_root *root)
4669 {
4670 struct btrfs_fs_info *fs_info = root->fs_info;
4671 BTRFS_PATH_AUTO_FREE(path);
4672 struct btrfs_dir_item *di;
4673 struct btrfs_key key;
4674 struct fscrypt_str name = FSTR_INIT("default", 7);
4675 u64 dir_id;
4676 int ret;
4677
4678 path = btrfs_alloc_path();
4679 if (!path)
4680 return -ENOMEM;
4681
4682 /* Make sure this root isn't set as the default subvol */
4683 dir_id = btrfs_super_root_dir(fs_info->super_copy);
4684 di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path,
4685 dir_id, &name, 0);
4686 if (di && !IS_ERR(di)) {
4687 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
4688 if (key.objectid == btrfs_root_id(root)) {
4689 ret = -EPERM;
4690 btrfs_err(fs_info,
4691 "deleting default subvolume %llu is not allowed",
4692 key.objectid);
4693 return ret;
4694 }
4695 btrfs_release_path(path);
4696 }
4697
4698 key.objectid = btrfs_root_id(root);
4699 key.type = BTRFS_ROOT_REF_KEY;
4700 key.offset = (u64)-1;
4701
4702 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4703 if (ret < 0)
4704 return ret;
4705 if (unlikely(ret == 0)) {
4706 /*
4707 * Key with offset -1 found, there would have to exist a root
4708 * with such id, but this is out of valid range.
4709 */
4710 return -EUCLEAN;
4711 }
4712
4713 ret = 0;
4714 if (path->slots[0] > 0) {
4715 path->slots[0]--;
4716 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
4717 if (key.objectid == btrfs_root_id(root) && key.type == BTRFS_ROOT_REF_KEY)
4718 ret = -ENOTEMPTY;
4719 }
4720
4721 return ret;
4722 }
4723
4724 /* Delete all dentries for inodes belonging to the root */
btrfs_prune_dentries(struct btrfs_root * root)4725 static void btrfs_prune_dentries(struct btrfs_root *root)
4726 {
4727 struct btrfs_fs_info *fs_info = root->fs_info;
4728 struct btrfs_inode *inode;
4729 u64 min_ino = 0;
4730
4731 if (!BTRFS_FS_ERROR(fs_info))
4732 WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4733
4734 inode = btrfs_find_first_inode(root, min_ino);
4735 while (inode) {
4736 if (icount_read(&inode->vfs_inode) > 1)
4737 d_prune_aliases(&inode->vfs_inode);
4738
4739 min_ino = btrfs_ino(inode) + 1;
4740 /*
4741 * btrfs_drop_inode() will have it removed from the inode
4742 * cache when its usage count hits zero.
4743 */
4744 iput(&inode->vfs_inode);
4745 cond_resched();
4746 inode = btrfs_find_first_inode(root, min_ino);
4747 }
4748 }
4749
btrfs_delete_subvolume(struct btrfs_inode * dir,struct dentry * dentry)4750 int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
4751 {
4752 struct btrfs_root *root = dir->root;
4753 struct btrfs_fs_info *fs_info = root->fs_info;
4754 struct inode *inode = d_inode(dentry);
4755 struct btrfs_root *dest = BTRFS_I(inode)->root;
4756 struct btrfs_trans_handle *trans;
4757 struct btrfs_block_rsv block_rsv;
4758 u64 root_flags;
4759 u64 qgroup_reserved = 0;
4760 int ret;
4761
4762 down_write(&fs_info->subvol_sem);
4763
4764 /*
4765 * Don't allow to delete a subvolume with send in progress. This is
4766 * inside the inode lock so the error handling that has to drop the bit
4767 * again is not run concurrently.
4768 */
4769 spin_lock(&dest->root_item_lock);
4770 if (dest->send_in_progress) {
4771 spin_unlock(&dest->root_item_lock);
4772 btrfs_warn(fs_info,
4773 "attempt to delete subvolume %llu during send",
4774 btrfs_root_id(dest));
4775 ret = -EPERM;
4776 goto out_up_write;
4777 }
4778 if (atomic_read(&dest->nr_swapfiles)) {
4779 spin_unlock(&dest->root_item_lock);
4780 btrfs_warn(fs_info,
4781 "attempt to delete subvolume %llu with active swapfile",
4782 btrfs_root_id(dest));
4783 ret = -EPERM;
4784 goto out_up_write;
4785 }
4786 root_flags = btrfs_root_flags(&dest->root_item);
4787 btrfs_set_root_flags(&dest->root_item,
4788 root_flags | BTRFS_ROOT_SUBVOL_DEAD);
4789 spin_unlock(&dest->root_item_lock);
4790
4791 ret = may_destroy_subvol(dest);
4792 if (ret)
4793 goto out_undead;
4794
4795 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
4796 /*
4797 * One for dir inode,
4798 * two for dir entries,
4799 * two for root ref/backref.
4800 */
4801 ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
4802 if (ret)
4803 goto out_undead;
4804 qgroup_reserved = block_rsv.qgroup_rsv_reserved;
4805
4806 trans = btrfs_start_transaction(root, 0);
4807 if (IS_ERR(trans)) {
4808 ret = PTR_ERR(trans);
4809 goto out_release;
4810 }
4811 btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
4812 qgroup_reserved = 0;
4813 trans->block_rsv = &block_rsv;
4814 trans->bytes_reserved = block_rsv.size;
4815
4816 btrfs_record_snapshot_destroy(trans, dir);
4817
4818 ret = btrfs_unlink_subvol(trans, dir, dentry);
4819 if (unlikely(ret)) {
4820 btrfs_abort_transaction(trans, ret);
4821 goto out_end_trans;
4822 }
4823
4824 ret = btrfs_record_root_in_trans(trans, dest);
4825 if (unlikely(ret)) {
4826 btrfs_abort_transaction(trans, ret);
4827 goto out_end_trans;
4828 }
4829
4830 memset(&dest->root_item.drop_progress, 0,
4831 sizeof(dest->root_item.drop_progress));
4832 btrfs_set_root_drop_level(&dest->root_item, 0);
4833 btrfs_set_root_refs(&dest->root_item, 0);
4834
4835 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
4836 ret = btrfs_insert_orphan_item(trans,
4837 fs_info->tree_root,
4838 btrfs_root_id(dest));
4839 if (unlikely(ret)) {
4840 btrfs_abort_transaction(trans, ret);
4841 goto out_end_trans;
4842 }
4843 }
4844
4845 ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid,
4846 BTRFS_UUID_KEY_SUBVOL, btrfs_root_id(dest));
4847 if (unlikely(ret && ret != -ENOENT)) {
4848 btrfs_abort_transaction(trans, ret);
4849 goto out_end_trans;
4850 }
4851 if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
4852 ret = btrfs_uuid_tree_remove(trans,
4853 dest->root_item.received_uuid,
4854 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4855 btrfs_root_id(dest));
4856 if (unlikely(ret && ret != -ENOENT)) {
4857 btrfs_abort_transaction(trans, ret);
4858 goto out_end_trans;
4859 }
4860 }
4861
4862 free_anon_bdev(dest->anon_dev);
4863 dest->anon_dev = 0;
4864 out_end_trans:
4865 trans->block_rsv = NULL;
4866 trans->bytes_reserved = 0;
4867 ret = btrfs_end_transaction(trans);
4868 inode->i_flags |= S_DEAD;
4869 out_release:
4870 btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL);
4871 if (qgroup_reserved)
4872 btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
4873 out_undead:
4874 if (ret) {
4875 spin_lock(&dest->root_item_lock);
4876 root_flags = btrfs_root_flags(&dest->root_item);
4877 btrfs_set_root_flags(&dest->root_item,
4878 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
4879 spin_unlock(&dest->root_item_lock);
4880 }
4881 out_up_write:
4882 up_write(&fs_info->subvol_sem);
4883 if (!ret) {
4884 d_invalidate(dentry);
4885 btrfs_prune_dentries(dest);
4886 ASSERT(dest->send_in_progress == 0);
4887 }
4888
4889 return ret;
4890 }
4891
btrfs_rmdir(struct inode * vfs_dir,struct dentry * dentry)4892 static int btrfs_rmdir(struct inode *vfs_dir, struct dentry *dentry)
4893 {
4894 struct btrfs_inode *dir = BTRFS_I(vfs_dir);
4895 struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
4896 struct btrfs_fs_info *fs_info = inode->root->fs_info;
4897 int ret = 0;
4898 struct btrfs_trans_handle *trans;
4899 struct fscrypt_name fname;
4900
4901 if (inode->vfs_inode.i_size > BTRFS_EMPTY_DIR_SIZE)
4902 return -ENOTEMPTY;
4903 if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
4904 if (unlikely(btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))) {
4905 btrfs_err(fs_info,
4906 "extent tree v2 doesn't support snapshot deletion yet");
4907 return -EOPNOTSUPP;
4908 }
4909 return btrfs_delete_subvolume(dir, dentry);
4910 }
4911
4912 ret = fscrypt_setup_filename(vfs_dir, &dentry->d_name, 1, &fname);
4913 if (ret)
4914 return ret;
4915
4916 /* This needs to handle no-key deletions later on */
4917
4918 trans = __unlink_start_trans(dir);
4919 if (IS_ERR(trans)) {
4920 ret = PTR_ERR(trans);
4921 goto out_notrans;
4922 }
4923
4924 /*
4925 * Propagate the last_unlink_trans value of the deleted dir to its
4926 * parent directory. This is to prevent an unrecoverable log tree in the
4927 * case we do something like this:
4928 * 1) create dir foo
4929 * 2) create snapshot under dir foo
4930 * 3) delete the snapshot
4931 * 4) rmdir foo
4932 * 5) mkdir foo
4933 * 6) fsync foo or some file inside foo
4934 *
4935 * This is because we can't unlink other roots when replaying the dir
4936 * deletes for directory foo.
4937 */
4938 if (inode->last_unlink_trans >= trans->transid)
4939 btrfs_record_snapshot_destroy(trans, dir);
4940
4941 if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4942 ret = btrfs_unlink_subvol(trans, dir, dentry);
4943 goto out;
4944 }
4945
4946 ret = btrfs_orphan_add(trans, inode);
4947 if (ret)
4948 goto out;
4949
4950 /* now the directory is empty */
4951 ret = btrfs_unlink_inode(trans, dir, inode, &fname.disk_name);
4952 if (!ret)
4953 btrfs_i_size_write(inode, 0);
4954 out:
4955 btrfs_end_transaction(trans);
4956 out_notrans:
4957 btrfs_btree_balance_dirty(fs_info);
4958 fscrypt_free_filename(&fname);
4959
4960 return ret;
4961 }
4962
is_inside_block(u64 bytenr,u64 blockstart,u32 blocksize)4963 static bool is_inside_block(u64 bytenr, u64 blockstart, u32 blocksize)
4964 {
4965 ASSERT(IS_ALIGNED(blockstart, blocksize), "blockstart=%llu blocksize=%u",
4966 blockstart, blocksize);
4967
4968 if (blockstart <= bytenr && bytenr <= blockstart + blocksize - 1)
4969 return true;
4970 return false;
4971 }
4972
truncate_block_zero_beyond_eof(struct btrfs_inode * inode,u64 start)4973 static int truncate_block_zero_beyond_eof(struct btrfs_inode *inode, u64 start)
4974 {
4975 const pgoff_t index = (start >> PAGE_SHIFT);
4976 struct address_space *mapping = inode->vfs_inode.i_mapping;
4977 struct folio *folio;
4978 u64 zero_start;
4979 u64 zero_end;
4980 int ret = 0;
4981
4982 again:
4983 folio = filemap_lock_folio(mapping, index);
4984 /* No folio present. */
4985 if (IS_ERR(folio))
4986 return 0;
4987
4988 if (!folio_test_uptodate(folio)) {
4989 ret = btrfs_read_folio(NULL, folio);
4990 folio_lock(folio);
4991 if (folio->mapping != mapping) {
4992 folio_unlock(folio);
4993 folio_put(folio);
4994 goto again;
4995 }
4996 if (unlikely(!folio_test_uptodate(folio))) {
4997 ret = -EIO;
4998 goto out_unlock;
4999 }
5000 }
5001 folio_wait_writeback(folio);
5002
5003 /*
5004 * We do not need to lock extents nor wait for OE, as it's already
5005 * beyond EOF.
5006 */
5007
5008 zero_start = max_t(u64, folio_pos(folio), start);
5009 zero_end = folio_next_pos(folio);
5010 folio_zero_range(folio, zero_start - folio_pos(folio),
5011 zero_end - zero_start);
5012
5013 out_unlock:
5014 folio_unlock(folio);
5015 folio_put(folio);
5016 return ret;
5017 }
5018
5019 /*
5020 * Handle the truncation of a fs block.
5021 *
5022 * @inode - inode that we're zeroing
5023 * @offset - the file offset of the block to truncate
5024 * The value must be inside [@start, @end], and the function will do
5025 * extra checks if the block that covers @offset needs to be zeroed.
5026 * @start - the start file offset of the range we want to zero
5027 * @end - the end (inclusive) file offset of the range we want to zero.
5028 *
5029 * If the range is not block aligned, read out the folio that covers @offset,
5030 * and if needed zero blocks that are inside the folio and covered by [@start, @end).
5031 * If @start or @end + 1 lands inside a block, that block will be marked dirty
5032 * for writeback.
5033 *
5034 * This is utilized by hole punch, zero range, file expansion.
5035 */
btrfs_truncate_block(struct btrfs_inode * inode,u64 offset,u64 start,u64 end)5036 int btrfs_truncate_block(struct btrfs_inode *inode, u64 offset, u64 start, u64 end)
5037 {
5038 struct btrfs_fs_info *fs_info = inode->root->fs_info;
5039 struct address_space *mapping = inode->vfs_inode.i_mapping;
5040 struct extent_io_tree *io_tree = &inode->io_tree;
5041 struct btrfs_ordered_extent *ordered;
5042 struct extent_state *cached_state = NULL;
5043 struct extent_changeset *data_reserved = NULL;
5044 bool only_release_metadata = false;
5045 u32 blocksize = fs_info->sectorsize;
5046 pgoff_t index = (offset >> PAGE_SHIFT);
5047 struct folio *folio;
5048 gfp_t mask = btrfs_alloc_write_mask(mapping);
5049 int ret = 0;
5050 const bool in_head_block = is_inside_block(offset, round_down(start, blocksize),
5051 blocksize);
5052 const bool in_tail_block = is_inside_block(offset, round_down(end, blocksize),
5053 blocksize);
5054 bool need_truncate_head = false;
5055 bool need_truncate_tail = false;
5056 u64 zero_start;
5057 u64 zero_end;
5058 u64 block_start;
5059 u64 block_end;
5060
5061 /* @offset should be inside the range. */
5062 ASSERT(start <= offset && offset <= end, "offset=%llu start=%llu end=%llu",
5063 offset, start, end);
5064
5065 /* The range is aligned at both ends. */
5066 if (IS_ALIGNED(start, blocksize) && IS_ALIGNED(end + 1, blocksize)) {
5067 /*
5068 * For block size < page size case, we may have polluted blocks
5069 * beyond EOF. So we also need to zero them out.
5070 */
5071 if (end == (u64)-1 && blocksize < PAGE_SIZE)
5072 ret = truncate_block_zero_beyond_eof(inode, start);
5073 goto out;
5074 }
5075
5076 /*
5077 * @offset may not be inside the head nor tail block. In that case we
5078 * don't need to do anything.
5079 */
5080 if (!in_head_block && !in_tail_block)
5081 goto out;
5082
5083 /*
5084 * Skip the truncation if the range in the target block is already aligned.
5085 * The seemingly complex check will also handle the same block case.
5086 */
5087 if (in_head_block && !IS_ALIGNED(start, blocksize))
5088 need_truncate_head = true;
5089 if (in_tail_block && !IS_ALIGNED(end + 1, blocksize))
5090 need_truncate_tail = true;
5091 if (!need_truncate_head && !need_truncate_tail)
5092 goto out;
5093
5094 block_start = round_down(offset, blocksize);
5095 block_end = block_start + blocksize - 1;
5096
5097 ret = btrfs_check_data_free_space(inode, &data_reserved, block_start,
5098 blocksize, false);
5099 if (ret < 0) {
5100 size_t write_bytes = blocksize;
5101
5102 if (btrfs_check_nocow_lock(inode, block_start, &write_bytes, false) > 0) {
5103 /* For nocow case, no need to reserve data space. */
5104 ASSERT(write_bytes == blocksize, "write_bytes=%zu blocksize=%u",
5105 write_bytes, blocksize);
5106 only_release_metadata = true;
5107 } else {
5108 goto out;
5109 }
5110 }
5111 ret = btrfs_delalloc_reserve_metadata(inode, blocksize, blocksize, false);
5112 if (ret < 0) {
5113 if (!only_release_metadata)
5114 btrfs_free_reserved_data_space(inode, data_reserved,
5115 block_start, blocksize);
5116 goto out;
5117 }
5118 again:
5119 folio = __filemap_get_folio(mapping, index,
5120 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask);
5121 if (IS_ERR(folio)) {
5122 if (only_release_metadata)
5123 btrfs_delalloc_release_metadata(inode, blocksize, true);
5124 else
5125 btrfs_delalloc_release_space(inode, data_reserved,
5126 block_start, blocksize, true);
5127 btrfs_delalloc_release_extents(inode, blocksize);
5128 ret = PTR_ERR(folio);
5129 goto out;
5130 }
5131
5132 if (!folio_test_uptodate(folio)) {
5133 ret = btrfs_read_folio(NULL, folio);
5134 folio_lock(folio);
5135 if (folio->mapping != mapping) {
5136 folio_unlock(folio);
5137 folio_put(folio);
5138 goto again;
5139 }
5140 if (unlikely(!folio_test_uptodate(folio))) {
5141 ret = -EIO;
5142 goto out_unlock;
5143 }
5144 }
5145
5146 /*
5147 * We unlock the page after the io is completed and then re-lock it
5148 * above. release_folio() could have come in between that and cleared
5149 * folio private, but left the page in the mapping. Set the page mapped
5150 * here to make sure it's properly set for the subpage stuff.
5151 */
5152 ret = set_folio_extent_mapped(folio);
5153 if (ret < 0)
5154 goto out_unlock;
5155
5156 folio_wait_writeback(folio);
5157
5158 btrfs_lock_extent(io_tree, block_start, block_end, &cached_state);
5159
5160 ordered = btrfs_lookup_ordered_extent(inode, block_start);
5161 if (ordered) {
5162 btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
5163 folio_unlock(folio);
5164 folio_put(folio);
5165 btrfs_start_ordered_extent(ordered);
5166 btrfs_put_ordered_extent(ordered);
5167 goto again;
5168 }
5169
5170 btrfs_clear_extent_bit(&inode->io_tree, block_start, block_end,
5171 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
5172 &cached_state);
5173
5174 ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
5175 &cached_state);
5176 if (ret) {
5177 btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
5178 goto out_unlock;
5179 }
5180
5181 if (end == (u64)-1) {
5182 /*
5183 * We're truncating beyond EOF, the remaining blocks normally are
5184 * already holes thus no need to zero again, but it's possible for
5185 * fs block size < page size cases to have memory mapped writes
5186 * to pollute ranges beyond EOF.
5187 *
5188 * In that case although such polluted blocks beyond EOF will
5189 * not reach disk, it still affects our page caches.
5190 */
5191 zero_start = max_t(u64, folio_pos(folio), start);
5192 zero_end = min_t(u64, folio_next_pos(folio) - 1, end);
5193 } else {
5194 zero_start = max_t(u64, block_start, start);
5195 zero_end = min_t(u64, block_end, end);
5196 }
5197 folio_zero_range(folio, zero_start - folio_pos(folio),
5198 zero_end - zero_start + 1);
5199
5200 btrfs_folio_clear_checked(fs_info, folio, block_start,
5201 block_end + 1 - block_start);
5202 btrfs_folio_set_dirty(fs_info, folio, block_start,
5203 block_end + 1 - block_start);
5204
5205 if (only_release_metadata)
5206 btrfs_set_extent_bit(&inode->io_tree, block_start, block_end,
5207 EXTENT_NORESERVE, &cached_state);
5208
5209 btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
5210
5211 out_unlock:
5212 if (ret) {
5213 if (only_release_metadata)
5214 btrfs_delalloc_release_metadata(inode, blocksize, true);
5215 else
5216 btrfs_delalloc_release_space(inode, data_reserved,
5217 block_start, blocksize, true);
5218 }
5219 btrfs_delalloc_release_extents(inode, blocksize);
5220 folio_unlock(folio);
5221 folio_put(folio);
5222 out:
5223 if (only_release_metadata)
5224 btrfs_check_nocow_unlock(inode);
5225 extent_changeset_free(data_reserved);
5226 return ret;
5227 }
5228
maybe_insert_hole(struct btrfs_inode * inode,u64 offset,u64 len)5229 static int maybe_insert_hole(struct btrfs_inode *inode, u64 offset, u64 len)
5230 {
5231 struct btrfs_root *root = inode->root;
5232 struct btrfs_fs_info *fs_info = root->fs_info;
5233 struct btrfs_trans_handle *trans;
5234 struct btrfs_drop_extents_args drop_args = { 0 };
5235 int ret;
5236
5237 /*
5238 * If NO_HOLES is enabled, we don't need to do anything.
5239 * Later, up in the call chain, either btrfs_set_inode_last_sub_trans()
5240 * or btrfs_update_inode() will be called, which guarantee that the next
5241 * fsync will know this inode was changed and needs to be logged.
5242 */
5243 if (btrfs_fs_incompat(fs_info, NO_HOLES))
5244 return 0;
5245
5246 /*
5247 * 1 - for the one we're dropping
5248 * 1 - for the one we're adding
5249 * 1 - for updating the inode.
5250 */
5251 trans = btrfs_start_transaction(root, 3);
5252 if (IS_ERR(trans))
5253 return PTR_ERR(trans);
5254
5255 drop_args.start = offset;
5256 drop_args.end = offset + len;
5257 drop_args.drop_cache = true;
5258
5259 ret = btrfs_drop_extents(trans, root, inode, &drop_args);
5260 if (unlikely(ret)) {
5261 btrfs_abort_transaction(trans, ret);
5262 btrfs_end_transaction(trans);
5263 return ret;
5264 }
5265
5266 ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset, len);
5267 if (ret) {
5268 btrfs_abort_transaction(trans, ret);
5269 } else {
5270 btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found);
5271 btrfs_update_inode(trans, inode);
5272 }
5273 btrfs_end_transaction(trans);
5274 return ret;
5275 }
5276
5277 /*
5278 * This function puts in dummy file extents for the area we're creating a hole
5279 * for. So if we are truncating this file to a larger size we need to insert
5280 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
5281 * the range between oldsize and size
5282 */
btrfs_cont_expand(struct btrfs_inode * inode,loff_t oldsize,loff_t size)5283 int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
5284 {
5285 struct btrfs_root *root = inode->root;
5286 struct btrfs_fs_info *fs_info = root->fs_info;
5287 struct extent_io_tree *io_tree = &inode->io_tree;
5288 struct extent_map *em = NULL;
5289 struct extent_state *cached_state = NULL;
5290 u64 hole_start = ALIGN(oldsize, fs_info->sectorsize);
5291 u64 block_end = ALIGN(size, fs_info->sectorsize);
5292 u64 last_byte;
5293 u64 cur_offset;
5294 u64 hole_size;
5295 int ret = 0;
5296
5297 /*
5298 * If our size started in the middle of a block we need to zero out the
5299 * rest of the block before we expand the i_size, otherwise we could
5300 * expose stale data.
5301 */
5302 ret = btrfs_truncate_block(inode, oldsize, oldsize, -1);
5303 if (ret)
5304 return ret;
5305
5306 if (size <= hole_start)
5307 return 0;
5308
5309 btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1,
5310 &cached_state);
5311 cur_offset = hole_start;
5312 while (1) {
5313 em = btrfs_get_extent(inode, NULL, cur_offset, block_end - cur_offset);
5314 if (IS_ERR(em)) {
5315 ret = PTR_ERR(em);
5316 em = NULL;
5317 break;
5318 }
5319 last_byte = min(btrfs_extent_map_end(em), block_end);
5320 last_byte = ALIGN(last_byte, fs_info->sectorsize);
5321 hole_size = last_byte - cur_offset;
5322
5323 if (!(em->flags & EXTENT_FLAG_PREALLOC)) {
5324 struct extent_map *hole_em;
5325
5326 ret = maybe_insert_hole(inode, cur_offset, hole_size);
5327 if (ret)
5328 break;
5329
5330 ret = btrfs_inode_set_file_extent_range(inode,
5331 cur_offset, hole_size);
5332 if (ret)
5333 break;
5334
5335 hole_em = btrfs_alloc_extent_map();
5336 if (!hole_em) {
5337 btrfs_drop_extent_map_range(inode, cur_offset,
5338 cur_offset + hole_size - 1,
5339 false);
5340 btrfs_set_inode_full_sync(inode);
5341 goto next;
5342 }
5343 hole_em->start = cur_offset;
5344 hole_em->len = hole_size;
5345
5346 hole_em->disk_bytenr = EXTENT_MAP_HOLE;
5347 hole_em->disk_num_bytes = 0;
5348 hole_em->ram_bytes = hole_size;
5349 hole_em->generation = btrfs_get_fs_generation(fs_info);
5350
5351 ret = btrfs_replace_extent_map_range(inode, hole_em, true);
5352 btrfs_free_extent_map(hole_em);
5353 } else {
5354 ret = btrfs_inode_set_file_extent_range(inode,
5355 cur_offset, hole_size);
5356 if (ret)
5357 break;
5358 }
5359 next:
5360 btrfs_free_extent_map(em);
5361 em = NULL;
5362 cur_offset = last_byte;
5363 if (cur_offset >= block_end)
5364 break;
5365 }
5366 btrfs_free_extent_map(em);
5367 btrfs_unlock_extent(io_tree, hole_start, block_end - 1, &cached_state);
5368 return ret;
5369 }
5370
btrfs_setsize(struct inode * inode,struct iattr * attr)5371 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
5372 {
5373 struct btrfs_root *root = BTRFS_I(inode)->root;
5374 struct btrfs_trans_handle *trans;
5375 loff_t oldsize = i_size_read(inode);
5376 loff_t newsize = attr->ia_size;
5377 int mask = attr->ia_valid;
5378 int ret;
5379
5380 /*
5381 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
5382 * special case where we need to update the times despite not having
5383 * these flags set. For all other operations the VFS set these flags
5384 * explicitly if it wants a timestamp update.
5385 */
5386 if (newsize != oldsize) {
5387 inode_inc_iversion(inode);
5388 if (!(mask & (ATTR_CTIME | ATTR_MTIME))) {
5389 inode_set_mtime_to_ts(inode,
5390 inode_set_ctime_current(inode));
5391 }
5392 }
5393
5394 if (newsize > oldsize) {
5395 /*
5396 * Don't do an expanding truncate while snapshotting is ongoing.
5397 * This is to ensure the snapshot captures a fully consistent
5398 * state of this file - if the snapshot captures this expanding
5399 * truncation, it must capture all writes that happened before
5400 * this truncation.
5401 */
5402 btrfs_drew_write_lock(&root->snapshot_lock);
5403 ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, newsize);
5404 if (ret) {
5405 btrfs_drew_write_unlock(&root->snapshot_lock);
5406 return ret;
5407 }
5408
5409 trans = btrfs_start_transaction(root, 1);
5410 if (IS_ERR(trans)) {
5411 btrfs_drew_write_unlock(&root->snapshot_lock);
5412 return PTR_ERR(trans);
5413 }
5414
5415 i_size_write(inode, newsize);
5416 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
5417 pagecache_isize_extended(inode, oldsize, newsize);
5418 ret = btrfs_update_inode(trans, BTRFS_I(inode));
5419 btrfs_drew_write_unlock(&root->snapshot_lock);
5420 btrfs_end_transaction(trans);
5421 } else {
5422 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
5423
5424 if (btrfs_is_zoned(fs_info)) {
5425 ret = btrfs_wait_ordered_range(BTRFS_I(inode),
5426 ALIGN(newsize, fs_info->sectorsize),
5427 (u64)-1);
5428 if (ret)
5429 return ret;
5430 }
5431
5432 /*
5433 * We're truncating a file that used to have good data down to
5434 * zero. Make sure any new writes to the file get on disk
5435 * on close.
5436 */
5437 if (newsize == 0)
5438 set_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
5439 &BTRFS_I(inode)->runtime_flags);
5440
5441 truncate_setsize(inode, newsize);
5442
5443 inode_dio_wait(inode);
5444
5445 ret = btrfs_truncate(BTRFS_I(inode), newsize == oldsize);
5446 if (ret && inode->i_nlink) {
5447 int ret2;
5448
5449 /*
5450 * Truncate failed, so fix up the in-memory size. We
5451 * adjusted disk_i_size down as we removed extents, so
5452 * wait for disk_i_size to be stable and then update the
5453 * in-memory size to match.
5454 */
5455 ret2 = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
5456 if (ret2)
5457 return ret2;
5458 i_size_write(inode, BTRFS_I(inode)->disk_i_size);
5459 }
5460 }
5461
5462 return ret;
5463 }
5464
btrfs_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)5465 static int btrfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
5466 struct iattr *attr)
5467 {
5468 struct inode *inode = d_inode(dentry);
5469 struct btrfs_root *root = BTRFS_I(inode)->root;
5470 int ret;
5471
5472 if (btrfs_root_readonly(root))
5473 return -EROFS;
5474
5475 ret = setattr_prepare(idmap, dentry, attr);
5476 if (ret)
5477 return ret;
5478
5479 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
5480 ret = btrfs_setsize(inode, attr);
5481 if (ret)
5482 return ret;
5483 }
5484
5485 if (attr->ia_valid) {
5486 setattr_copy(idmap, inode, attr);
5487 inode_inc_iversion(inode);
5488 ret = btrfs_dirty_inode(BTRFS_I(inode));
5489
5490 if (!ret && attr->ia_valid & ATTR_MODE)
5491 ret = posix_acl_chmod(idmap, dentry, inode->i_mode);
5492 }
5493
5494 return ret;
5495 }
5496
5497 /*
5498 * While truncating the inode pages during eviction, we get the VFS
5499 * calling btrfs_invalidate_folio() against each folio of the inode. This
5500 * is slow because the calls to btrfs_invalidate_folio() result in a
5501 * huge amount of calls to lock_extent() and clear_extent_bit(),
5502 * which keep merging and splitting extent_state structures over and over,
5503 * wasting lots of time.
5504 *
5505 * Therefore if the inode is being evicted, let btrfs_invalidate_folio()
5506 * skip all those expensive operations on a per folio basis and do only
5507 * the ordered io finishing, while we release here the extent_map and
5508 * extent_state structures, without the excessive merging and splitting.
5509 */
evict_inode_truncate_pages(struct inode * inode)5510 static void evict_inode_truncate_pages(struct inode *inode)
5511 {
5512 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5513 struct rb_node *node;
5514
5515 ASSERT(inode_state_read_once(inode) & I_FREEING);
5516 truncate_inode_pages_final(&inode->i_data);
5517
5518 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
5519
5520 /*
5521 * Keep looping until we have no more ranges in the io tree.
5522 * We can have ongoing bios started by readahead that have
5523 * their endio callback (extent_io.c:end_bio_extent_readpage)
5524 * still in progress (unlocked the pages in the bio but did not yet
5525 * unlocked the ranges in the io tree). Therefore this means some
5526 * ranges can still be locked and eviction started because before
5527 * submitting those bios, which are executed by a separate task (work
5528 * queue kthread), inode references (inode->i_count) were not taken
5529 * (which would be dropped in the end io callback of each bio).
5530 * Therefore here we effectively end up waiting for those bios and
5531 * anyone else holding locked ranges without having bumped the inode's
5532 * reference count - if we don't do it, when they access the inode's
5533 * io_tree to unlock a range it may be too late, leading to an
5534 * use-after-free issue.
5535 */
5536 spin_lock(&io_tree->lock);
5537 while (!RB_EMPTY_ROOT(&io_tree->state)) {
5538 struct extent_state *state;
5539 struct extent_state *cached_state = NULL;
5540 u64 start;
5541 u64 end;
5542 unsigned state_flags;
5543
5544 node = rb_first(&io_tree->state);
5545 state = rb_entry(node, struct extent_state, rb_node);
5546 start = state->start;
5547 end = state->end;
5548 state_flags = state->state;
5549 spin_unlock(&io_tree->lock);
5550
5551 btrfs_lock_extent(io_tree, start, end, &cached_state);
5552
5553 /*
5554 * If still has DELALLOC flag, the extent didn't reach disk,
5555 * and its reserved space won't be freed by delayed_ref.
5556 * So we need to free its reserved space here.
5557 * (Refer to comment in btrfs_invalidate_folio, case 2)
5558 *
5559 * Note, end is the bytenr of last byte, so we need + 1 here.
5560 */
5561 if (state_flags & EXTENT_DELALLOC)
5562 btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start,
5563 end - start + 1, NULL);
5564
5565 btrfs_clear_extent_bit(io_tree, start, end,
5566 EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING,
5567 &cached_state);
5568
5569 cond_resched();
5570 spin_lock(&io_tree->lock);
5571 }
5572 spin_unlock(&io_tree->lock);
5573 }
5574
evict_refill_and_join(struct btrfs_root * root,struct btrfs_block_rsv * rsv)5575 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
5576 struct btrfs_block_rsv *rsv)
5577 {
5578 struct btrfs_fs_info *fs_info = root->fs_info;
5579 struct btrfs_trans_handle *trans;
5580 u64 delayed_refs_extra = btrfs_calc_delayed_ref_bytes(fs_info, 1);
5581 int ret;
5582
5583 /*
5584 * Eviction should be taking place at some place safe because of our
5585 * delayed iputs. However the normal flushing code will run delayed
5586 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock.
5587 *
5588 * We reserve the delayed_refs_extra here again because we can't use
5589 * btrfs_start_transaction(root, 0) for the same deadlocky reason as
5590 * above. We reserve our extra bit here because we generate a ton of
5591 * delayed refs activity by truncating.
5592 *
5593 * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can,
5594 * if we fail to make this reservation we can re-try without the
5595 * delayed_refs_extra so we can make some forward progress.
5596 */
5597 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size + delayed_refs_extra,
5598 BTRFS_RESERVE_FLUSH_EVICT);
5599 if (ret) {
5600 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size,
5601 BTRFS_RESERVE_FLUSH_EVICT);
5602 if (ret) {
5603 btrfs_warn(fs_info,
5604 "could not allocate space for delete; will truncate on mount");
5605 return ERR_PTR(-ENOSPC);
5606 }
5607 delayed_refs_extra = 0;
5608 }
5609
5610 trans = btrfs_join_transaction(root);
5611 if (IS_ERR(trans))
5612 return trans;
5613
5614 if (delayed_refs_extra) {
5615 trans->block_rsv = &fs_info->trans_block_rsv;
5616 trans->bytes_reserved = delayed_refs_extra;
5617 btrfs_block_rsv_migrate(rsv, trans->block_rsv,
5618 delayed_refs_extra, true);
5619 }
5620 return trans;
5621 }
5622
btrfs_evict_inode(struct inode * inode)5623 void btrfs_evict_inode(struct inode *inode)
5624 {
5625 struct btrfs_fs_info *fs_info;
5626 struct btrfs_trans_handle *trans;
5627 struct btrfs_root *root = BTRFS_I(inode)->root;
5628 struct btrfs_block_rsv rsv;
5629 int ret;
5630
5631 trace_btrfs_inode_evict(inode);
5632
5633 if (!root)
5634 goto clear_inode;
5635
5636 fs_info = inode_to_fs_info(inode);
5637 evict_inode_truncate_pages(inode);
5638
5639 if (inode->i_nlink &&
5640 ((btrfs_root_refs(&root->root_item) != 0 &&
5641 btrfs_root_id(root) != BTRFS_ROOT_TREE_OBJECTID) ||
5642 btrfs_is_free_space_inode(BTRFS_I(inode))))
5643 goto out;
5644
5645 if (is_bad_inode(inode))
5646 goto out;
5647
5648 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
5649 goto out;
5650
5651 if (inode->i_nlink > 0) {
5652 BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
5653 btrfs_root_id(root) != BTRFS_ROOT_TREE_OBJECTID);
5654 goto out;
5655 }
5656
5657 /*
5658 * This makes sure the inode item in tree is uptodate and the space for
5659 * the inode update is released.
5660 */
5661 ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode));
5662 if (ret)
5663 goto out;
5664
5665 /*
5666 * This drops any pending insert or delete operations we have for this
5667 * inode. We could have a delayed dir index deletion queued up, but
5668 * we're removing the inode completely so that'll be taken care of in
5669 * the truncate.
5670 */
5671 btrfs_kill_delayed_inode_items(BTRFS_I(inode));
5672
5673 btrfs_init_metadata_block_rsv(fs_info, &rsv, BTRFS_BLOCK_RSV_TEMP);
5674 rsv.size = btrfs_calc_metadata_size(fs_info, 1);
5675 rsv.failfast = true;
5676
5677 btrfs_i_size_write(BTRFS_I(inode), 0);
5678
5679 while (1) {
5680 struct btrfs_truncate_control control = {
5681 .inode = BTRFS_I(inode),
5682 .ino = btrfs_ino(BTRFS_I(inode)),
5683 .new_size = 0,
5684 .min_type = 0,
5685 };
5686
5687 trans = evict_refill_and_join(root, &rsv);
5688 if (IS_ERR(trans))
5689 goto out_release;
5690
5691 trans->block_rsv = &rsv;
5692
5693 ret = btrfs_truncate_inode_items(trans, root, &control);
5694 trans->block_rsv = &fs_info->trans_block_rsv;
5695 btrfs_end_transaction(trans);
5696 /*
5697 * We have not added new delayed items for our inode after we
5698 * have flushed its delayed items, so no need to throttle on
5699 * delayed items. However we have modified extent buffers.
5700 */
5701 btrfs_btree_balance_dirty_nodelay(fs_info);
5702 if (ret && ret != -ENOSPC && ret != -EAGAIN)
5703 goto out_release;
5704 else if (!ret)
5705 break;
5706 }
5707
5708 /*
5709 * Errors here aren't a big deal, it just means we leave orphan items in
5710 * the tree. They will be cleaned up on the next mount. If the inode
5711 * number gets reused, cleanup deletes the orphan item without doing
5712 * anything, and unlink reuses the existing orphan item.
5713 *
5714 * If it turns out that we are dropping too many of these, we might want
5715 * to add a mechanism for retrying these after a commit.
5716 */
5717 trans = evict_refill_and_join(root, &rsv);
5718 if (!IS_ERR(trans)) {
5719 trans->block_rsv = &rsv;
5720 btrfs_orphan_del(trans, BTRFS_I(inode));
5721 trans->block_rsv = &fs_info->trans_block_rsv;
5722 btrfs_end_transaction(trans);
5723 }
5724
5725 out_release:
5726 btrfs_block_rsv_release(fs_info, &rsv, (u64)-1, NULL);
5727 out:
5728 /*
5729 * If we didn't successfully delete, the orphan item will still be in
5730 * the tree and we'll retry on the next mount. Again, we might also want
5731 * to retry these periodically in the future.
5732 */
5733 btrfs_remove_delayed_node(BTRFS_I(inode));
5734 clear_inode:
5735 clear_inode(inode);
5736 }
5737
5738 /*
5739 * Return the key found in the dir entry in the location pointer, fill @type
5740 * with BTRFS_FT_*, and return 0.
5741 *
5742 * If no dir entries were found, returns -ENOENT.
5743 * If found a corrupted location in dir entry, returns -EUCLEAN.
5744 */
btrfs_inode_by_name(struct btrfs_inode * dir,struct dentry * dentry,struct btrfs_key * location,u8 * type)5745 static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry,
5746 struct btrfs_key *location, u8 *type)
5747 {
5748 struct btrfs_dir_item *di;
5749 BTRFS_PATH_AUTO_FREE(path);
5750 struct btrfs_root *root = dir->root;
5751 int ret = 0;
5752 struct fscrypt_name fname;
5753
5754 path = btrfs_alloc_path();
5755 if (!path)
5756 return -ENOMEM;
5757
5758 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname);
5759 if (ret < 0)
5760 return ret;
5761 /*
5762 * fscrypt_setup_filename() should never return a positive value, but
5763 * gcc on sparc/parisc thinks it can, so assert that doesn't happen.
5764 */
5765 ASSERT(ret == 0);
5766
5767 /* This needs to handle no-key deletions later on */
5768
5769 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir),
5770 &fname.disk_name, 0);
5771 if (IS_ERR_OR_NULL(di)) {
5772 ret = di ? PTR_ERR(di) : -ENOENT;
5773 goto out;
5774 }
5775
5776 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
5777 if (unlikely(location->type != BTRFS_INODE_ITEM_KEY &&
5778 location->type != BTRFS_ROOT_ITEM_KEY)) {
5779 ret = -EUCLEAN;
5780 btrfs_warn(root->fs_info,
5781 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location " BTRFS_KEY_FMT ")",
5782 __func__, fname.disk_name.name, btrfs_ino(dir),
5783 BTRFS_KEY_FMT_VALUE(location));
5784 }
5785 if (!ret)
5786 *type = btrfs_dir_ftype(path->nodes[0], di);
5787 out:
5788 fscrypt_free_filename(&fname);
5789 return ret;
5790 }
5791
5792 /*
5793 * when we hit a tree root in a directory, the btrfs part of the inode
5794 * needs to be changed to reflect the root directory of the tree root. This
5795 * is kind of like crossing a mount point.
5796 */
fixup_tree_root_location(struct btrfs_fs_info * fs_info,struct btrfs_inode * dir,struct dentry * dentry,struct btrfs_key * location,struct btrfs_root ** sub_root)5797 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
5798 struct btrfs_inode *dir,
5799 struct dentry *dentry,
5800 struct btrfs_key *location,
5801 struct btrfs_root **sub_root)
5802 {
5803 BTRFS_PATH_AUTO_FREE(path);
5804 struct btrfs_root *new_root;
5805 struct btrfs_root_ref *ref;
5806 struct extent_buffer *leaf;
5807 struct btrfs_key key;
5808 int ret;
5809 int err = 0;
5810 struct fscrypt_name fname;
5811
5812 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 0, &fname);
5813 if (ret)
5814 return ret;
5815
5816 path = btrfs_alloc_path();
5817 if (!path) {
5818 err = -ENOMEM;
5819 goto out;
5820 }
5821
5822 err = -ENOENT;
5823 key.objectid = btrfs_root_id(dir->root);
5824 key.type = BTRFS_ROOT_REF_KEY;
5825 key.offset = location->objectid;
5826
5827 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
5828 if (ret) {
5829 if (ret < 0)
5830 err = ret;
5831 goto out;
5832 }
5833
5834 leaf = path->nodes[0];
5835 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
5836 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
5837 btrfs_root_ref_name_len(leaf, ref) != fname.disk_name.len)
5838 goto out;
5839
5840 ret = memcmp_extent_buffer(leaf, fname.disk_name.name,
5841 (unsigned long)(ref + 1), fname.disk_name.len);
5842 if (ret)
5843 goto out;
5844
5845 btrfs_release_path(path);
5846
5847 new_root = btrfs_get_fs_root(fs_info, location->objectid, true);
5848 if (IS_ERR(new_root)) {
5849 err = PTR_ERR(new_root);
5850 goto out;
5851 }
5852
5853 *sub_root = new_root;
5854 location->objectid = btrfs_root_dirid(&new_root->root_item);
5855 location->type = BTRFS_INODE_ITEM_KEY;
5856 location->offset = 0;
5857 err = 0;
5858 out:
5859 fscrypt_free_filename(&fname);
5860 return err;
5861 }
5862
5863
5864
btrfs_del_inode_from_root(struct btrfs_inode * inode)5865 static void btrfs_del_inode_from_root(struct btrfs_inode *inode)
5866 {
5867 struct btrfs_root *root = inode->root;
5868 struct btrfs_inode *entry;
5869 bool empty = false;
5870
5871 xa_lock(&root->inodes);
5872 /*
5873 * This btrfs_inode is being freed and has already been unhashed at this
5874 * point. It's possible that another btrfs_inode has already been
5875 * allocated for the same inode and inserted itself into the root, so
5876 * don't delete it in that case.
5877 *
5878 * Note that this shouldn't need to allocate memory, so the gfp flags
5879 * don't really matter.
5880 */
5881 entry = __xa_cmpxchg(&root->inodes, btrfs_ino(inode), inode, NULL,
5882 GFP_ATOMIC);
5883 if (entry == inode)
5884 empty = xa_empty(&root->inodes);
5885 xa_unlock(&root->inodes);
5886
5887 if (empty && btrfs_root_refs(&root->root_item) == 0) {
5888 xa_lock(&root->inodes);
5889 empty = xa_empty(&root->inodes);
5890 xa_unlock(&root->inodes);
5891 if (empty)
5892 btrfs_add_dead_root(root);
5893 }
5894 }
5895
5896
btrfs_init_locked_inode(struct inode * inode,void * p)5897 static int btrfs_init_locked_inode(struct inode *inode, void *p)
5898 {
5899 struct btrfs_iget_args *args = p;
5900
5901 btrfs_set_inode_number(BTRFS_I(inode), args->ino);
5902 BTRFS_I(inode)->root = btrfs_grab_root(args->root);
5903
5904 if (args->root && args->root == args->root->fs_info->tree_root &&
5905 args->ino != BTRFS_BTREE_INODE_OBJECTID)
5906 set_bit(BTRFS_INODE_FREE_SPACE_INODE,
5907 &BTRFS_I(inode)->runtime_flags);
5908 return 0;
5909 }
5910
btrfs_find_actor(struct inode * inode,void * opaque)5911 static int btrfs_find_actor(struct inode *inode, void *opaque)
5912 {
5913 struct btrfs_iget_args *args = opaque;
5914
5915 return args->ino == btrfs_ino(BTRFS_I(inode)) &&
5916 args->root == BTRFS_I(inode)->root;
5917 }
5918
btrfs_iget_locked(u64 ino,struct btrfs_root * root)5919 static struct btrfs_inode *btrfs_iget_locked(u64 ino, struct btrfs_root *root)
5920 {
5921 struct inode *inode;
5922 struct btrfs_iget_args args;
5923 unsigned long hashval = btrfs_inode_hash(ino, root);
5924
5925 args.ino = ino;
5926 args.root = root;
5927
5928 inode = iget5_locked_rcu(root->fs_info->sb, hashval, btrfs_find_actor,
5929 btrfs_init_locked_inode,
5930 (void *)&args);
5931 if (!inode)
5932 return NULL;
5933 return BTRFS_I(inode);
5934 }
5935
5936 /*
5937 * Get an inode object given its inode number and corresponding root. Path is
5938 * preallocated to prevent recursing back to iget through allocator.
5939 */
btrfs_iget_path(u64 ino,struct btrfs_root * root,struct btrfs_path * path)5940 struct btrfs_inode *btrfs_iget_path(u64 ino, struct btrfs_root *root,
5941 struct btrfs_path *path)
5942 {
5943 struct btrfs_inode *inode;
5944 int ret;
5945
5946 inode = btrfs_iget_locked(ino, root);
5947 if (!inode)
5948 return ERR_PTR(-ENOMEM);
5949
5950 if (!(inode_state_read_once(&inode->vfs_inode) & I_NEW))
5951 return inode;
5952
5953 ret = btrfs_read_locked_inode(inode, path);
5954 if (ret)
5955 return ERR_PTR(ret);
5956
5957 unlock_new_inode(&inode->vfs_inode);
5958 return inode;
5959 }
5960
5961 /*
5962 * Get an inode object given its inode number and corresponding root.
5963 */
btrfs_iget(u64 ino,struct btrfs_root * root)5964 struct btrfs_inode *btrfs_iget(u64 ino, struct btrfs_root *root)
5965 {
5966 struct btrfs_inode *inode;
5967 struct btrfs_path *path;
5968 int ret;
5969
5970 inode = btrfs_iget_locked(ino, root);
5971 if (!inode)
5972 return ERR_PTR(-ENOMEM);
5973
5974 if (!(inode_state_read_once(&inode->vfs_inode) & I_NEW))
5975 return inode;
5976
5977 path = btrfs_alloc_path();
5978 if (!path) {
5979 iget_failed(&inode->vfs_inode);
5980 return ERR_PTR(-ENOMEM);
5981 }
5982
5983 ret = btrfs_read_locked_inode(inode, path);
5984 btrfs_free_path(path);
5985 if (ret)
5986 return ERR_PTR(ret);
5987
5988 if (S_ISDIR(inode->vfs_inode.i_mode))
5989 inode->vfs_inode.i_opflags |= IOP_FASTPERM_MAY_EXEC;
5990 unlock_new_inode(&inode->vfs_inode);
5991 return inode;
5992 }
5993
new_simple_dir(struct inode * dir,struct btrfs_key * key,struct btrfs_root * root)5994 static struct btrfs_inode *new_simple_dir(struct inode *dir,
5995 struct btrfs_key *key,
5996 struct btrfs_root *root)
5997 {
5998 struct timespec64 ts;
5999 struct inode *vfs_inode;
6000 struct btrfs_inode *inode;
6001
6002 vfs_inode = new_inode(dir->i_sb);
6003 if (!vfs_inode)
6004 return ERR_PTR(-ENOMEM);
6005
6006 inode = BTRFS_I(vfs_inode);
6007 inode->root = btrfs_grab_root(root);
6008 inode->ref_root_id = key->objectid;
6009 set_bit(BTRFS_INODE_ROOT_STUB, &inode->runtime_flags);
6010 set_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags);
6011
6012 btrfs_set_inode_number(inode, BTRFS_EMPTY_SUBVOL_DIR_OBJECTID);
6013 /*
6014 * We only need lookup, the rest is read-only and there's no inode
6015 * associated with the dentry
6016 */
6017 vfs_inode->i_op = &simple_dir_inode_operations;
6018 vfs_inode->i_opflags &= ~IOP_XATTR;
6019 vfs_inode->i_fop = &simple_dir_operations;
6020 vfs_inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
6021
6022 ts = inode_set_ctime_current(vfs_inode);
6023 inode_set_mtime_to_ts(vfs_inode, ts);
6024 inode_set_atime_to_ts(vfs_inode, inode_get_atime(dir));
6025 inode->i_otime_sec = ts.tv_sec;
6026 inode->i_otime_nsec = ts.tv_nsec;
6027
6028 vfs_inode->i_uid = dir->i_uid;
6029 vfs_inode->i_gid = dir->i_gid;
6030
6031 return inode;
6032 }
6033
6034 static_assert(BTRFS_FT_UNKNOWN == FT_UNKNOWN);
6035 static_assert(BTRFS_FT_REG_FILE == FT_REG_FILE);
6036 static_assert(BTRFS_FT_DIR == FT_DIR);
6037 static_assert(BTRFS_FT_CHRDEV == FT_CHRDEV);
6038 static_assert(BTRFS_FT_BLKDEV == FT_BLKDEV);
6039 static_assert(BTRFS_FT_FIFO == FT_FIFO);
6040 static_assert(BTRFS_FT_SOCK == FT_SOCK);
6041 static_assert(BTRFS_FT_SYMLINK == FT_SYMLINK);
6042
btrfs_inode_type(const struct btrfs_inode * inode)6043 static inline u8 btrfs_inode_type(const struct btrfs_inode *inode)
6044 {
6045 return fs_umode_to_ftype(inode->vfs_inode.i_mode);
6046 }
6047
btrfs_lookup_dentry(struct inode * dir,struct dentry * dentry)6048 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
6049 {
6050 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
6051 struct btrfs_inode *inode;
6052 struct btrfs_root *root = BTRFS_I(dir)->root;
6053 struct btrfs_root *sub_root = root;
6054 struct btrfs_key location = { 0 };
6055 u8 di_type = 0;
6056 int ret = 0;
6057
6058 if (dentry->d_name.len > BTRFS_NAME_LEN)
6059 return ERR_PTR(-ENAMETOOLONG);
6060
6061 ret = btrfs_inode_by_name(BTRFS_I(dir), dentry, &location, &di_type);
6062 if (ret < 0)
6063 return ERR_PTR(ret);
6064
6065 if (location.type == BTRFS_INODE_ITEM_KEY) {
6066 inode = btrfs_iget(location.objectid, root);
6067 if (IS_ERR(inode))
6068 return ERR_CAST(inode);
6069
6070 /* Do extra check against inode mode with di_type */
6071 if (unlikely(btrfs_inode_type(inode) != di_type)) {
6072 btrfs_crit(fs_info,
6073 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u",
6074 inode->vfs_inode.i_mode, btrfs_inode_type(inode),
6075 di_type);
6076 iput(&inode->vfs_inode);
6077 return ERR_PTR(-EUCLEAN);
6078 }
6079 return &inode->vfs_inode;
6080 }
6081
6082 ret = fixup_tree_root_location(fs_info, BTRFS_I(dir), dentry,
6083 &location, &sub_root);
6084 if (ret < 0) {
6085 if (ret != -ENOENT)
6086 inode = ERR_PTR(ret);
6087 else
6088 inode = new_simple_dir(dir, &location, root);
6089 } else {
6090 inode = btrfs_iget(location.objectid, sub_root);
6091 btrfs_put_root(sub_root);
6092
6093 if (IS_ERR(inode))
6094 return ERR_CAST(inode);
6095
6096 down_read(&fs_info->cleanup_work_sem);
6097 if (!sb_rdonly(inode->vfs_inode.i_sb))
6098 ret = btrfs_orphan_cleanup(sub_root);
6099 up_read(&fs_info->cleanup_work_sem);
6100 if (ret) {
6101 iput(&inode->vfs_inode);
6102 inode = ERR_PTR(ret);
6103 }
6104 }
6105
6106 if (IS_ERR(inode))
6107 return ERR_CAST(inode);
6108
6109 return &inode->vfs_inode;
6110 }
6111
btrfs_dentry_delete(const struct dentry * dentry)6112 static int btrfs_dentry_delete(const struct dentry *dentry)
6113 {
6114 struct btrfs_root *root;
6115 struct inode *inode = d_inode(dentry);
6116
6117 if (!inode && !IS_ROOT(dentry))
6118 inode = d_inode(dentry->d_parent);
6119
6120 if (inode) {
6121 root = BTRFS_I(inode)->root;
6122 if (btrfs_root_refs(&root->root_item) == 0)
6123 return 1;
6124
6125 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
6126 return 1;
6127 }
6128 return 0;
6129 }
6130
btrfs_lookup(struct inode * dir,struct dentry * dentry,unsigned int flags)6131 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
6132 unsigned int flags)
6133 {
6134 struct inode *inode = btrfs_lookup_dentry(dir, dentry);
6135
6136 if (inode == ERR_PTR(-ENOENT))
6137 inode = NULL;
6138 return d_splice_alias(inode, dentry);
6139 }
6140
6141 /*
6142 * Find the highest existing sequence number in a directory and then set the
6143 * in-memory index_cnt variable to the first free sequence number.
6144 */
btrfs_set_inode_index_count(struct btrfs_inode * inode)6145 static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
6146 {
6147 struct btrfs_root *root = inode->root;
6148 struct btrfs_key key, found_key;
6149 BTRFS_PATH_AUTO_FREE(path);
6150 struct extent_buffer *leaf;
6151 int ret;
6152
6153 key.objectid = btrfs_ino(inode);
6154 key.type = BTRFS_DIR_INDEX_KEY;
6155 key.offset = (u64)-1;
6156
6157 path = btrfs_alloc_path();
6158 if (!path)
6159 return -ENOMEM;
6160
6161 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6162 if (ret < 0)
6163 return ret;
6164
6165 if (unlikely(ret == 0)) {
6166 /*
6167 * Key with offset -1 found, there would have to exist a dir
6168 * index item with such offset, but this is out of the valid
6169 * range.
6170 */
6171 btrfs_err(root->fs_info,
6172 "unexpected exact match for DIR_INDEX key, inode %llu",
6173 btrfs_ino(inode));
6174 return -EUCLEAN;
6175 }
6176
6177 if (path->slots[0] == 0) {
6178 inode->index_cnt = BTRFS_DIR_START_INDEX;
6179 return 0;
6180 }
6181
6182 path->slots[0]--;
6183
6184 leaf = path->nodes[0];
6185 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6186
6187 if (found_key.objectid != btrfs_ino(inode) ||
6188 found_key.type != BTRFS_DIR_INDEX_KEY) {
6189 inode->index_cnt = BTRFS_DIR_START_INDEX;
6190 return 0;
6191 }
6192
6193 inode->index_cnt = found_key.offset + 1;
6194
6195 return 0;
6196 }
6197
btrfs_get_dir_last_index(struct btrfs_inode * dir,u64 * index)6198 static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index)
6199 {
6200 int ret = 0;
6201
6202 btrfs_inode_lock(dir, 0);
6203 if (dir->index_cnt == (u64)-1) {
6204 ret = btrfs_inode_delayed_dir_index_count(dir);
6205 if (ret) {
6206 ret = btrfs_set_inode_index_count(dir);
6207 if (ret)
6208 goto out;
6209 }
6210 }
6211
6212 /* index_cnt is the index number of next new entry, so decrement it. */
6213 *index = dir->index_cnt - 1;
6214 out:
6215 btrfs_inode_unlock(dir, 0);
6216
6217 return ret;
6218 }
6219
6220 /*
6221 * All this infrastructure exists because dir_emit can fault, and we are holding
6222 * the tree lock when doing readdir. For now just allocate a buffer and copy
6223 * our information into that, and then dir_emit from the buffer. This is
6224 * similar to what NFS does, only we don't keep the buffer around in pagecache
6225 * because I'm afraid I'll mess that up. Long term we need to make filldir do
6226 * copy_to_user_inatomic so we don't have to worry about page faulting under the
6227 * tree lock.
6228 */
btrfs_opendir(struct inode * inode,struct file * file)6229 static int btrfs_opendir(struct inode *inode, struct file *file)
6230 {
6231 struct btrfs_file_private *private;
6232 u64 last_index;
6233 int ret;
6234
6235 ret = btrfs_get_dir_last_index(BTRFS_I(inode), &last_index);
6236 if (ret)
6237 return ret;
6238
6239 private = kzalloc_obj(struct btrfs_file_private);
6240 if (!private)
6241 return -ENOMEM;
6242 private->last_index = last_index;
6243 private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
6244 if (!private->filldir_buf) {
6245 kfree(private);
6246 return -ENOMEM;
6247 }
6248 file->private_data = private;
6249 return 0;
6250 }
6251
btrfs_dir_llseek(struct file * file,loff_t offset,int whence)6252 static loff_t btrfs_dir_llseek(struct file *file, loff_t offset, int whence)
6253 {
6254 struct btrfs_file_private *private = file->private_data;
6255 int ret;
6256
6257 ret = btrfs_get_dir_last_index(BTRFS_I(file_inode(file)),
6258 &private->last_index);
6259 if (ret)
6260 return ret;
6261
6262 return generic_file_llseek(file, offset, whence);
6263 }
6264
6265 struct dir_entry {
6266 u64 ino;
6267 u64 offset;
6268 unsigned type;
6269 int name_len;
6270 };
6271
btrfs_filldir(void * addr,int entries,struct dir_context * ctx)6272 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
6273 {
6274 while (entries--) {
6275 struct dir_entry *entry = addr;
6276 char *name = (char *)(entry + 1);
6277
6278 ctx->pos = get_unaligned(&entry->offset);
6279 if (!dir_emit(ctx, name, get_unaligned(&entry->name_len),
6280 get_unaligned(&entry->ino),
6281 get_unaligned(&entry->type)))
6282 return 1;
6283 addr += sizeof(struct dir_entry) +
6284 get_unaligned(&entry->name_len);
6285 ctx->pos++;
6286 }
6287 return 0;
6288 }
6289
btrfs_real_readdir(struct file * file,struct dir_context * ctx)6290 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
6291 {
6292 struct inode *inode = file_inode(file);
6293 struct btrfs_root *root = BTRFS_I(inode)->root;
6294 struct btrfs_file_private *private = file->private_data;
6295 struct btrfs_dir_item *di;
6296 struct btrfs_key key;
6297 struct btrfs_key found_key;
6298 BTRFS_PATH_AUTO_FREE(path);
6299 void *addr;
6300 LIST_HEAD(ins_list);
6301 LIST_HEAD(del_list);
6302 int ret;
6303 char *name_ptr;
6304 int name_len;
6305 int entries = 0;
6306 int total_len = 0;
6307 bool put = false;
6308 struct btrfs_key location;
6309
6310 if (!dir_emit_dots(file, ctx))
6311 return 0;
6312
6313 path = btrfs_alloc_path();
6314 if (!path)
6315 return -ENOMEM;
6316
6317 addr = private->filldir_buf;
6318 path->reada = READA_FORWARD;
6319
6320 put = btrfs_readdir_get_delayed_items(BTRFS_I(inode), private->last_index,
6321 &ins_list, &del_list);
6322
6323 again:
6324 key.type = BTRFS_DIR_INDEX_KEY;
6325 key.offset = ctx->pos;
6326 key.objectid = btrfs_ino(BTRFS_I(inode));
6327
6328 btrfs_for_each_slot(root, &key, &found_key, path, ret) {
6329 struct dir_entry *entry;
6330 struct extent_buffer *leaf = path->nodes[0];
6331 u8 ftype;
6332
6333 if (found_key.objectid != key.objectid)
6334 break;
6335 if (found_key.type != BTRFS_DIR_INDEX_KEY)
6336 break;
6337 if (found_key.offset < ctx->pos)
6338 continue;
6339 if (found_key.offset > private->last_index)
6340 break;
6341 if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
6342 continue;
6343 di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
6344 name_len = btrfs_dir_name_len(leaf, di);
6345 if ((total_len + sizeof(struct dir_entry) + name_len) >=
6346 PAGE_SIZE) {
6347 btrfs_release_path(path);
6348 ret = btrfs_filldir(private->filldir_buf, entries, ctx);
6349 if (ret)
6350 goto nopos;
6351 addr = private->filldir_buf;
6352 entries = 0;
6353 total_len = 0;
6354 goto again;
6355 }
6356
6357 ftype = btrfs_dir_flags_to_ftype(btrfs_dir_flags(leaf, di));
6358 entry = addr;
6359 name_ptr = (char *)(entry + 1);
6360 read_extent_buffer(leaf, name_ptr,
6361 (unsigned long)(di + 1), name_len);
6362 put_unaligned(name_len, &entry->name_len);
6363 put_unaligned(fs_ftype_to_dtype(ftype), &entry->type);
6364 btrfs_dir_item_key_to_cpu(leaf, di, &location);
6365 put_unaligned(location.objectid, &entry->ino);
6366 put_unaligned(found_key.offset, &entry->offset);
6367 entries++;
6368 addr += sizeof(struct dir_entry) + name_len;
6369 total_len += sizeof(struct dir_entry) + name_len;
6370 }
6371 /* Catch error encountered during iteration */
6372 if (ret < 0)
6373 goto err;
6374
6375 btrfs_release_path(path);
6376
6377 ret = btrfs_filldir(private->filldir_buf, entries, ctx);
6378 if (ret)
6379 goto nopos;
6380
6381 if (btrfs_readdir_delayed_dir_index(ctx, &ins_list))
6382 goto nopos;
6383
6384 /*
6385 * Stop new entries from being returned after we return the last
6386 * entry.
6387 *
6388 * New directory entries are assigned a strictly increasing
6389 * offset. This means that new entries created during readdir
6390 * are *guaranteed* to be seen in the future by that readdir.
6391 * This has broken buggy programs which operate on names as
6392 * they're returned by readdir. Until we reuse freed offsets
6393 * we have this hack to stop new entries from being returned
6394 * under the assumption that they'll never reach this huge
6395 * offset.
6396 *
6397 * This is being careful not to overflow 32bit loff_t unless the
6398 * last entry requires it because doing so has broken 32bit apps
6399 * in the past.
6400 */
6401 if (ctx->pos >= INT_MAX)
6402 ctx->pos = LLONG_MAX;
6403 else
6404 ctx->pos = INT_MAX;
6405 nopos:
6406 ret = 0;
6407 err:
6408 if (put)
6409 btrfs_readdir_put_delayed_items(BTRFS_I(inode), &ins_list, &del_list);
6410 return ret;
6411 }
6412
6413 /*
6414 * This is somewhat expensive, updating the tree every time the
6415 * inode changes. But, it is most likely to find the inode in cache.
6416 * FIXME, needs more benchmarking...there are no reasons other than performance
6417 * to keep or drop this code.
6418 */
btrfs_dirty_inode(struct btrfs_inode * inode)6419 static int btrfs_dirty_inode(struct btrfs_inode *inode)
6420 {
6421 struct btrfs_root *root = inode->root;
6422 struct btrfs_fs_info *fs_info = root->fs_info;
6423 struct btrfs_trans_handle *trans;
6424 int ret;
6425
6426 if (test_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags))
6427 return 0;
6428
6429 trans = btrfs_join_transaction(root);
6430 if (IS_ERR(trans))
6431 return PTR_ERR(trans);
6432
6433 ret = btrfs_update_inode(trans, inode);
6434 if (ret == -ENOSPC || ret == -EDQUOT) {
6435 /* whoops, lets try again with the full transaction */
6436 btrfs_end_transaction(trans);
6437 trans = btrfs_start_transaction(root, 1);
6438 if (IS_ERR(trans))
6439 return PTR_ERR(trans);
6440
6441 ret = btrfs_update_inode(trans, inode);
6442 }
6443 btrfs_end_transaction(trans);
6444 if (inode->delayed_node)
6445 btrfs_balance_delayed_items(fs_info);
6446
6447 return ret;
6448 }
6449
6450 /*
6451 * We need our own ->update_time so that we can return error on ENOSPC for
6452 * updating the inode in the case of file write and mmap writes.
6453 */
btrfs_update_time(struct inode * inode,enum fs_update_time type,unsigned int flags)6454 static int btrfs_update_time(struct inode *inode, enum fs_update_time type,
6455 unsigned int flags)
6456 {
6457 struct btrfs_root *root = BTRFS_I(inode)->root;
6458 int dirty;
6459
6460 if (btrfs_root_readonly(root))
6461 return -EROFS;
6462 if (flags & IOCB_NOWAIT)
6463 return -EAGAIN;
6464
6465 dirty = inode_update_time(inode, type, flags);
6466 if (dirty <= 0)
6467 return dirty;
6468 return btrfs_dirty_inode(BTRFS_I(inode));
6469 }
6470
6471 /*
6472 * helper to find a free sequence number in a given directory. This current
6473 * code is very simple, later versions will do smarter things in the btree
6474 */
btrfs_set_inode_index(struct btrfs_inode * dir,u64 * index)6475 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index)
6476 {
6477 int ret = 0;
6478
6479 if (dir->index_cnt == (u64)-1) {
6480 ret = btrfs_inode_delayed_dir_index_count(dir);
6481 if (ret) {
6482 ret = btrfs_set_inode_index_count(dir);
6483 if (ret)
6484 return ret;
6485 }
6486 }
6487
6488 *index = dir->index_cnt;
6489 dir->index_cnt++;
6490
6491 return ret;
6492 }
6493
btrfs_insert_inode_locked(struct inode * inode)6494 static int btrfs_insert_inode_locked(struct inode *inode)
6495 {
6496 struct btrfs_iget_args args;
6497
6498 args.ino = btrfs_ino(BTRFS_I(inode));
6499 args.root = BTRFS_I(inode)->root;
6500
6501 return insert_inode_locked4(inode,
6502 btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
6503 btrfs_find_actor, &args);
6504 }
6505
btrfs_new_inode_prepare(struct btrfs_new_inode_args * args,unsigned int * trans_num_items)6506 int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args,
6507 unsigned int *trans_num_items)
6508 {
6509 struct inode *dir = args->dir;
6510 struct inode *inode = args->inode;
6511 int ret;
6512
6513 if (!args->orphan) {
6514 ret = fscrypt_setup_filename(dir, &args->dentry->d_name, 0,
6515 &args->fname);
6516 if (ret)
6517 return ret;
6518 }
6519
6520 ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl);
6521 if (ret) {
6522 fscrypt_free_filename(&args->fname);
6523 return ret;
6524 }
6525
6526 /* 1 to add inode item */
6527 *trans_num_items = 1;
6528 /* 1 to add compression property */
6529 if (BTRFS_I(dir)->prop_compress)
6530 (*trans_num_items)++;
6531 /* 1 to add default ACL xattr */
6532 if (args->default_acl)
6533 (*trans_num_items)++;
6534 /* 1 to add access ACL xattr */
6535 if (args->acl)
6536 (*trans_num_items)++;
6537 #ifdef CONFIG_SECURITY
6538 /* 1 to add LSM xattr */
6539 if (dir->i_security)
6540 (*trans_num_items)++;
6541 #endif
6542 if (args->orphan) {
6543 /* 1 to add orphan item */
6544 (*trans_num_items)++;
6545 } else {
6546 /*
6547 * 1 to add dir item
6548 * 1 to add dir index
6549 * 1 to update parent inode item
6550 *
6551 * No need for 1 unit for the inode ref item because it is
6552 * inserted in a batch together with the inode item at
6553 * btrfs_create_new_inode().
6554 */
6555 *trans_num_items += 3;
6556 }
6557 return 0;
6558 }
6559
btrfs_new_inode_args_destroy(struct btrfs_new_inode_args * args)6560 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args)
6561 {
6562 posix_acl_release(args->acl);
6563 posix_acl_release(args->default_acl);
6564 fscrypt_free_filename(&args->fname);
6565 }
6566
6567 /*
6568 * Inherit flags from the parent inode.
6569 *
6570 * Currently only the compression flags and the cow flags are inherited.
6571 */
btrfs_inherit_iflags(struct btrfs_inode * inode,struct btrfs_inode * dir)6572 static void btrfs_inherit_iflags(struct btrfs_inode *inode, struct btrfs_inode *dir)
6573 {
6574 unsigned int flags;
6575
6576 flags = dir->flags;
6577
6578 if (flags & BTRFS_INODE_NOCOMPRESS) {
6579 inode->flags &= ~BTRFS_INODE_COMPRESS;
6580 inode->flags |= BTRFS_INODE_NOCOMPRESS;
6581 } else if (flags & BTRFS_INODE_COMPRESS) {
6582 inode->flags &= ~BTRFS_INODE_NOCOMPRESS;
6583 inode->flags |= BTRFS_INODE_COMPRESS;
6584 }
6585
6586 if (flags & BTRFS_INODE_NODATACOW) {
6587 inode->flags |= BTRFS_INODE_NODATACOW;
6588 if (S_ISREG(inode->vfs_inode.i_mode))
6589 inode->flags |= BTRFS_INODE_NODATASUM;
6590 }
6591
6592 btrfs_sync_inode_flags_to_i_flags(inode);
6593 }
6594
btrfs_create_new_inode(struct btrfs_trans_handle * trans,struct btrfs_new_inode_args * args)6595 int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
6596 struct btrfs_new_inode_args *args)
6597 {
6598 struct timespec64 ts;
6599 struct inode *dir = args->dir;
6600 struct inode *inode = args->inode;
6601 const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name;
6602 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
6603 struct btrfs_root *root;
6604 struct btrfs_inode_item *inode_item;
6605 struct btrfs_path *path;
6606 u64 objectid;
6607 struct btrfs_inode_ref *ref;
6608 struct btrfs_key key[2];
6609 u32 sizes[2];
6610 struct btrfs_item_batch batch;
6611 unsigned long ptr;
6612 int ret;
6613 bool xa_reserved = false;
6614
6615 if (!args->orphan && !args->subvol) {
6616 /*
6617 * Before anything else, check if we can add the name to the
6618 * parent directory. We want to avoid a dir item overflow in
6619 * case we have an existing dir item due to existing name
6620 * hash collisions. We do this check here before we call
6621 * btrfs_add_link() down below so that we can avoid a
6622 * transaction abort (which could be exploited by malicious
6623 * users).
6624 *
6625 * For subvolumes we already do this in btrfs_mksubvol().
6626 */
6627 ret = btrfs_check_dir_item_collision(BTRFS_I(dir)->root,
6628 btrfs_ino(BTRFS_I(dir)),
6629 name);
6630 if (ret < 0)
6631 return ret;
6632 }
6633
6634 path = btrfs_alloc_path();
6635 if (!path)
6636 return -ENOMEM;
6637
6638 if (!args->subvol)
6639 BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root);
6640 root = BTRFS_I(inode)->root;
6641
6642 ret = btrfs_init_file_extent_tree(BTRFS_I(inode));
6643 if (ret)
6644 goto out;
6645
6646 ret = btrfs_get_free_objectid(root, &objectid);
6647 if (ret)
6648 goto out;
6649 btrfs_set_inode_number(BTRFS_I(inode), objectid);
6650
6651 ret = xa_reserve(&root->inodes, objectid, GFP_NOFS);
6652 if (ret)
6653 goto out;
6654 xa_reserved = true;
6655
6656 if (args->orphan) {
6657 /*
6658 * O_TMPFILE, set link count to 0, so that after this point, we
6659 * fill in an inode item with the correct link count.
6660 */
6661 set_nlink(inode, 0);
6662 } else {
6663 trace_btrfs_inode_request(dir);
6664
6665 ret = btrfs_set_inode_index(BTRFS_I(dir), &BTRFS_I(inode)->dir_index);
6666 if (ret)
6667 goto out;
6668 }
6669
6670 if (S_ISDIR(inode->i_mode))
6671 BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX;
6672
6673 BTRFS_I(inode)->generation = trans->transid;
6674 inode->i_generation = BTRFS_I(inode)->generation;
6675
6676 /*
6677 * We don't have any capability xattrs set here yet, shortcut any
6678 * queries for the xattrs here. If we add them later via the inode
6679 * security init path or any other path this flag will be cleared.
6680 */
6681 set_bit(BTRFS_INODE_NO_CAP_XATTR, &BTRFS_I(inode)->runtime_flags);
6682
6683 /*
6684 * Subvolumes don't inherit flags from their parent directory.
6685 * Originally this was probably by accident, but we probably can't
6686 * change it now without compatibility issues.
6687 */
6688 if (!args->subvol)
6689 btrfs_inherit_iflags(BTRFS_I(inode), BTRFS_I(dir));
6690
6691 btrfs_set_inode_mapping_order(BTRFS_I(inode));
6692 if (S_ISREG(inode->i_mode)) {
6693 if (btrfs_test_opt(fs_info, NODATASUM))
6694 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6695 if (btrfs_test_opt(fs_info, NODATACOW))
6696 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
6697 BTRFS_INODE_NODATASUM;
6698 btrfs_update_inode_mapping_flags(BTRFS_I(inode));
6699 }
6700
6701 ret = btrfs_insert_inode_locked(inode);
6702 if (ret < 0) {
6703 if (!args->orphan)
6704 BTRFS_I(dir)->index_cnt--;
6705 goto out;
6706 }
6707
6708 /*
6709 * We could have gotten an inode number from somebody who was fsynced
6710 * and then removed in this same transaction, so let's just set full
6711 * sync since it will be a full sync anyway and this will blow away the
6712 * old info in the log.
6713 */
6714 btrfs_set_inode_full_sync(BTRFS_I(inode));
6715
6716 key[0].objectid = objectid;
6717 key[0].type = BTRFS_INODE_ITEM_KEY;
6718 key[0].offset = 0;
6719
6720 sizes[0] = sizeof(struct btrfs_inode_item);
6721
6722 if (!args->orphan) {
6723 /*
6724 * Start new inodes with an inode_ref. This is slightly more
6725 * efficient for small numbers of hard links since they will
6726 * be packed into one item. Extended refs will kick in if we
6727 * add more hard links than can fit in the ref item.
6728 */
6729 key[1].objectid = objectid;
6730 key[1].type = BTRFS_INODE_REF_KEY;
6731 if (args->subvol) {
6732 key[1].offset = objectid;
6733 sizes[1] = 2 + sizeof(*ref);
6734 } else {
6735 key[1].offset = btrfs_ino(BTRFS_I(dir));
6736 sizes[1] = name->len + sizeof(*ref);
6737 }
6738 }
6739
6740 batch.keys = &key[0];
6741 batch.data_sizes = &sizes[0];
6742 batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]);
6743 batch.nr = args->orphan ? 1 : 2;
6744 ret = btrfs_insert_empty_items(trans, root, path, &batch);
6745 if (unlikely(ret != 0)) {
6746 btrfs_abort_transaction(trans, ret);
6747 goto discard;
6748 }
6749
6750 ts = simple_inode_init_ts(inode);
6751 BTRFS_I(inode)->i_otime_sec = ts.tv_sec;
6752 BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec;
6753
6754 /*
6755 * We're going to fill the inode item now, so at this point the inode
6756 * must be fully initialized.
6757 */
6758
6759 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
6760 struct btrfs_inode_item);
6761 memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item,
6762 sizeof(*inode_item));
6763 fill_inode_item(trans, path->nodes[0], inode_item, inode);
6764
6765 if (!args->orphan) {
6766 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
6767 struct btrfs_inode_ref);
6768 ptr = (unsigned long)(ref + 1);
6769 if (args->subvol) {
6770 btrfs_set_inode_ref_name_len(path->nodes[0], ref, 2);
6771 btrfs_set_inode_ref_index(path->nodes[0], ref, 0);
6772 write_extent_buffer(path->nodes[0], "..", ptr, 2);
6773 } else {
6774 btrfs_set_inode_ref_name_len(path->nodes[0], ref,
6775 name->len);
6776 btrfs_set_inode_ref_index(path->nodes[0], ref,
6777 BTRFS_I(inode)->dir_index);
6778 write_extent_buffer(path->nodes[0], name->name, ptr,
6779 name->len);
6780 }
6781 }
6782
6783 /*
6784 * We don't need the path anymore, plus inheriting properties, adding
6785 * ACLs, security xattrs, orphan item or adding the link, will result in
6786 * allocating yet another path. So just free our path.
6787 */
6788 btrfs_free_path(path);
6789 path = NULL;
6790
6791 if (args->subvol) {
6792 struct btrfs_inode *parent;
6793
6794 /*
6795 * Subvolumes inherit properties from their parent subvolume,
6796 * not the directory they were created in.
6797 */
6798 parent = btrfs_iget(BTRFS_FIRST_FREE_OBJECTID, BTRFS_I(dir)->root);
6799 if (IS_ERR(parent)) {
6800 ret = PTR_ERR(parent);
6801 } else {
6802 ret = btrfs_inode_inherit_props(trans, BTRFS_I(inode),
6803 parent);
6804 iput(&parent->vfs_inode);
6805 }
6806 } else {
6807 ret = btrfs_inode_inherit_props(trans, BTRFS_I(inode),
6808 BTRFS_I(dir));
6809 }
6810 if (ret) {
6811 btrfs_err(fs_info,
6812 "error inheriting props for ino %llu (root %llu): %d",
6813 btrfs_ino(BTRFS_I(inode)), btrfs_root_id(root), ret);
6814 }
6815
6816 /*
6817 * Subvolumes don't inherit ACLs or get passed to the LSM. This is
6818 * probably a bug.
6819 */
6820 if (!args->subvol) {
6821 ret = btrfs_init_inode_security(trans, args);
6822 if (unlikely(ret)) {
6823 btrfs_abort_transaction(trans, ret);
6824 goto discard;
6825 }
6826 }
6827
6828 ret = btrfs_add_inode_to_root(BTRFS_I(inode), false);
6829 if (WARN_ON(ret)) {
6830 /* Shouldn't happen, we used xa_reserve() before. */
6831 btrfs_abort_transaction(trans, ret);
6832 goto discard;
6833 }
6834
6835 trace_btrfs_inode_new(inode);
6836 btrfs_set_inode_last_trans(trans, BTRFS_I(inode));
6837
6838 btrfs_update_root_times(trans, root);
6839
6840 if (args->orphan) {
6841 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
6842 if (unlikely(ret)) {
6843 btrfs_abort_transaction(trans, ret);
6844 goto discard;
6845 }
6846 } else {
6847 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
6848 0, BTRFS_I(inode)->dir_index);
6849 if (unlikely(ret)) {
6850 btrfs_abort_transaction(trans, ret);
6851 goto discard;
6852 }
6853 }
6854
6855 return 0;
6856
6857 discard:
6858 /*
6859 * discard_new_inode() calls iput(), but the caller owns the reference
6860 * to the inode.
6861 */
6862 ihold(inode);
6863 discard_new_inode(inode);
6864 out:
6865 if (xa_reserved)
6866 xa_release(&root->inodes, objectid);
6867
6868 btrfs_free_path(path);
6869 return ret;
6870 }
6871
6872 /*
6873 * utility function to add 'inode' into 'parent_inode' with
6874 * a give name and a given sequence number.
6875 * if 'add_backref' is true, also insert a backref from the
6876 * inode to the parent directory.
6877 */
btrfs_add_link(struct btrfs_trans_handle * trans,struct btrfs_inode * parent_inode,struct btrfs_inode * inode,const struct fscrypt_str * name,bool add_backref,u64 index)6878 int btrfs_add_link(struct btrfs_trans_handle *trans,
6879 struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
6880 const struct fscrypt_str *name, bool add_backref, u64 index)
6881 {
6882 int ret = 0;
6883 struct btrfs_key key;
6884 struct btrfs_root *root = parent_inode->root;
6885 u64 ino = btrfs_ino(inode);
6886 u64 parent_ino = btrfs_ino(parent_inode);
6887
6888 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6889 memcpy(&key, &inode->root->root_key, sizeof(key));
6890 } else {
6891 key.objectid = ino;
6892 key.type = BTRFS_INODE_ITEM_KEY;
6893 key.offset = 0;
6894 }
6895
6896 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6897 ret = btrfs_add_root_ref(trans, key.objectid,
6898 btrfs_root_id(root), parent_ino,
6899 index, name);
6900 } else if (add_backref) {
6901 ret = btrfs_insert_inode_ref(trans, root, name,
6902 ino, parent_ino, index);
6903 }
6904
6905 /* Nothing to clean up yet */
6906 if (ret)
6907 return ret;
6908
6909 ret = btrfs_insert_dir_item(trans, name, parent_inode, &key,
6910 btrfs_inode_type(inode), index);
6911 if (ret == -EEXIST || ret == -EOVERFLOW)
6912 goto fail_dir_item;
6913 else if (unlikely(ret)) {
6914 btrfs_abort_transaction(trans, ret);
6915 return ret;
6916 }
6917
6918 btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
6919 name->len * 2);
6920 inode_inc_iversion(&parent_inode->vfs_inode);
6921 update_time_after_link_or_unlink(parent_inode);
6922
6923 ret = btrfs_update_inode(trans, parent_inode);
6924 if (ret)
6925 btrfs_abort_transaction(trans, ret);
6926 return ret;
6927
6928 fail_dir_item:
6929 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6930 u64 local_index;
6931 int ret2;
6932
6933 ret2 = btrfs_del_root_ref(trans, key.objectid, btrfs_root_id(root),
6934 parent_ino, &local_index, name);
6935 if (ret2)
6936 btrfs_abort_transaction(trans, ret2);
6937 } else if (add_backref) {
6938 int ret2;
6939
6940 ret2 = btrfs_del_inode_ref(trans, root, name, ino, parent_ino, NULL);
6941 if (ret2)
6942 btrfs_abort_transaction(trans, ret2);
6943 }
6944
6945 /* Return the original error code */
6946 return ret;
6947 }
6948
btrfs_create_common(struct inode * dir,struct dentry * dentry,struct inode * inode)6949 static int btrfs_create_common(struct inode *dir, struct dentry *dentry,
6950 struct inode *inode)
6951 {
6952 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
6953 struct btrfs_root *root = BTRFS_I(dir)->root;
6954 struct btrfs_new_inode_args new_inode_args = {
6955 .dir = dir,
6956 .dentry = dentry,
6957 .inode = inode,
6958 };
6959 unsigned int trans_num_items;
6960 struct btrfs_trans_handle *trans;
6961 int ret;
6962
6963 ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
6964 if (ret)
6965 goto out_inode;
6966
6967 trans = btrfs_start_transaction(root, trans_num_items);
6968 if (IS_ERR(trans)) {
6969 ret = PTR_ERR(trans);
6970 goto out_new_inode_args;
6971 }
6972
6973 ret = btrfs_create_new_inode(trans, &new_inode_args);
6974 if (!ret) {
6975 if (S_ISDIR(inode->i_mode))
6976 inode->i_opflags |= IOP_FASTPERM_MAY_EXEC;
6977 d_instantiate_new(dentry, inode);
6978 }
6979
6980 btrfs_end_transaction(trans);
6981 btrfs_btree_balance_dirty(fs_info);
6982 out_new_inode_args:
6983 btrfs_new_inode_args_destroy(&new_inode_args);
6984 out_inode:
6985 if (ret)
6986 iput(inode);
6987 return ret;
6988 }
6989
btrfs_mknod(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode,dev_t rdev)6990 static int btrfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
6991 struct dentry *dentry, umode_t mode, dev_t rdev)
6992 {
6993 struct inode *inode;
6994
6995 inode = new_inode(dir->i_sb);
6996 if (!inode)
6997 return -ENOMEM;
6998 inode_init_owner(idmap, inode, dir, mode);
6999 inode->i_op = &btrfs_special_inode_operations;
7000 init_special_inode(inode, inode->i_mode, rdev);
7001 return btrfs_create_common(dir, dentry, inode);
7002 }
7003
btrfs_create(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode,bool excl)7004 static int btrfs_create(struct mnt_idmap *idmap, struct inode *dir,
7005 struct dentry *dentry, umode_t mode, bool excl)
7006 {
7007 struct inode *inode;
7008
7009 inode = new_inode(dir->i_sb);
7010 if (!inode)
7011 return -ENOMEM;
7012 inode_init_owner(idmap, inode, dir, mode);
7013 inode->i_fop = &btrfs_file_operations;
7014 inode->i_op = &btrfs_file_inode_operations;
7015 inode->i_mapping->a_ops = &btrfs_aops;
7016 return btrfs_create_common(dir, dentry, inode);
7017 }
7018
btrfs_link(struct dentry * old_dentry,struct inode * dir,struct dentry * dentry)7019 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
7020 struct dentry *dentry)
7021 {
7022 struct btrfs_trans_handle *trans = NULL;
7023 struct btrfs_root *root = BTRFS_I(dir)->root;
7024 struct inode *inode = d_inode(old_dentry);
7025 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
7026 struct fscrypt_name fname;
7027 u64 index;
7028 int ret;
7029
7030 /* do not allow sys_link's with other subvols of the same device */
7031 if (btrfs_root_id(root) != btrfs_root_id(BTRFS_I(inode)->root))
7032 return -EXDEV;
7033
7034 if (inode->i_nlink >= BTRFS_LINK_MAX)
7035 return -EMLINK;
7036
7037 ret = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname);
7038 if (ret)
7039 goto fail;
7040
7041 ret = btrfs_set_inode_index(BTRFS_I(dir), &index);
7042 if (ret)
7043 goto fail;
7044
7045 /*
7046 * 2 items for inode and inode ref
7047 * 2 items for dir items
7048 * 1 item for parent inode
7049 * 1 item for orphan item deletion if O_TMPFILE
7050 */
7051 trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6);
7052 if (IS_ERR(trans)) {
7053 ret = PTR_ERR(trans);
7054 trans = NULL;
7055 goto fail;
7056 }
7057
7058 /* There are several dir indexes for this inode, clear the cache. */
7059 BTRFS_I(inode)->dir_index = 0ULL;
7060 inode_inc_iversion(inode);
7061 inode_set_ctime_current(inode);
7062
7063 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
7064 &fname.disk_name, 1, index);
7065 if (ret)
7066 goto fail;
7067
7068 /* Link added now we update the inode item with the new link count. */
7069 inc_nlink(inode);
7070 ret = btrfs_update_inode(trans, BTRFS_I(inode));
7071 if (unlikely(ret)) {
7072 btrfs_abort_transaction(trans, ret);
7073 goto fail;
7074 }
7075
7076 if (inode->i_nlink == 1) {
7077 /*
7078 * If the new hard link count is 1, it's a file created with the
7079 * open(2) O_TMPFILE flag.
7080 */
7081 ret = btrfs_orphan_del(trans, BTRFS_I(inode));
7082 if (unlikely(ret)) {
7083 btrfs_abort_transaction(trans, ret);
7084 goto fail;
7085 }
7086 }
7087
7088 /* Grab reference for the new dentry passed to d_instantiate(). */
7089 ihold(inode);
7090 d_instantiate(dentry, inode);
7091 btrfs_log_new_name(trans, old_dentry, NULL, 0, dentry->d_parent);
7092
7093 fail:
7094 fscrypt_free_filename(&fname);
7095 if (trans)
7096 btrfs_end_transaction(trans);
7097 btrfs_btree_balance_dirty(fs_info);
7098 return ret;
7099 }
7100
btrfs_mkdir(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode)7101 static struct dentry *btrfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
7102 struct dentry *dentry, umode_t mode)
7103 {
7104 struct inode *inode;
7105
7106 inode = new_inode(dir->i_sb);
7107 if (!inode)
7108 return ERR_PTR(-ENOMEM);
7109 inode_init_owner(idmap, inode, dir, S_IFDIR | mode);
7110 inode->i_op = &btrfs_dir_inode_operations;
7111 inode->i_fop = &btrfs_dir_file_operations;
7112 return ERR_PTR(btrfs_create_common(dir, dentry, inode));
7113 }
7114
uncompress_inline(struct btrfs_path * path,struct folio * folio,struct btrfs_file_extent_item * item)7115 static noinline int uncompress_inline(struct btrfs_path *path,
7116 struct folio *folio,
7117 struct btrfs_file_extent_item *item)
7118 {
7119 int ret;
7120 struct extent_buffer *leaf = path->nodes[0];
7121 const u32 blocksize = leaf->fs_info->sectorsize;
7122 char *tmp;
7123 size_t max_size;
7124 unsigned long inline_size;
7125 unsigned long ptr;
7126 int compress_type;
7127
7128 compress_type = btrfs_file_extent_compression(leaf, item);
7129 max_size = btrfs_file_extent_ram_bytes(leaf, item);
7130 inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]);
7131 tmp = kmalloc(inline_size, GFP_NOFS);
7132 if (!tmp)
7133 return -ENOMEM;
7134 ptr = btrfs_file_extent_inline_start(item);
7135
7136 read_extent_buffer(leaf, tmp, ptr, inline_size);
7137
7138 max_size = min_t(unsigned long, blocksize, max_size);
7139 ret = btrfs_decompress(compress_type, tmp, folio, 0, inline_size,
7140 max_size);
7141
7142 /*
7143 * decompression code contains a memset to fill in any space between the end
7144 * of the uncompressed data and the end of max_size in case the decompressed
7145 * data ends up shorter than ram_bytes. That doesn't cover the hole between
7146 * the end of an inline extent and the beginning of the next block, so we
7147 * cover that region here.
7148 */
7149
7150 if (max_size < blocksize)
7151 folio_zero_range(folio, max_size, blocksize - max_size);
7152 kfree(tmp);
7153 return ret;
7154 }
7155
read_inline_extent(struct btrfs_path * path,struct folio * folio)7156 static int read_inline_extent(struct btrfs_path *path, struct folio *folio)
7157 {
7158 const u32 blocksize = path->nodes[0]->fs_info->sectorsize;
7159 struct btrfs_file_extent_item *fi;
7160 void *kaddr;
7161 size_t copy_size;
7162
7163 if (!folio || folio_test_uptodate(folio))
7164 return 0;
7165
7166 ASSERT(folio_pos(folio) == 0);
7167
7168 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
7169 struct btrfs_file_extent_item);
7170 if (btrfs_file_extent_compression(path->nodes[0], fi) != BTRFS_COMPRESS_NONE)
7171 return uncompress_inline(path, folio, fi);
7172
7173 copy_size = min_t(u64, blocksize,
7174 btrfs_file_extent_ram_bytes(path->nodes[0], fi));
7175 kaddr = kmap_local_folio(folio, 0);
7176 read_extent_buffer(path->nodes[0], kaddr,
7177 btrfs_file_extent_inline_start(fi), copy_size);
7178 kunmap_local(kaddr);
7179 if (copy_size < blocksize)
7180 folio_zero_range(folio, copy_size, blocksize - copy_size);
7181 return 0;
7182 }
7183
7184 /*
7185 * Lookup the first extent overlapping a range in a file.
7186 *
7187 * @inode: file to search in
7188 * @page: page to read extent data into if the extent is inline
7189 * @start: file offset
7190 * @len: length of range starting at @start
7191 *
7192 * Return the first &struct extent_map which overlaps the given range, reading
7193 * it from the B-tree and caching it if necessary. Note that there may be more
7194 * extents which overlap the given range after the returned extent_map.
7195 *
7196 * If @page is not NULL and the extent is inline, this also reads the extent
7197 * data directly into the page and marks the extent up to date in the io_tree.
7198 *
7199 * Return: ERR_PTR on error, non-NULL extent_map on success.
7200 */
btrfs_get_extent(struct btrfs_inode * inode,struct folio * folio,u64 start,u64 len)7201 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
7202 struct folio *folio, u64 start, u64 len)
7203 {
7204 struct btrfs_fs_info *fs_info = inode->root->fs_info;
7205 int ret = 0;
7206 u64 extent_start = 0;
7207 u64 extent_end = 0;
7208 u64 objectid = btrfs_ino(inode);
7209 int extent_type = -1;
7210 struct btrfs_path *path = NULL;
7211 struct btrfs_root *root = inode->root;
7212 struct btrfs_file_extent_item *item;
7213 struct extent_buffer *leaf;
7214 struct btrfs_key found_key;
7215 struct extent_map *em = NULL;
7216 struct extent_map_tree *em_tree = &inode->extent_tree;
7217
7218 read_lock(&em_tree->lock);
7219 em = btrfs_lookup_extent_mapping(em_tree, start, len);
7220 read_unlock(&em_tree->lock);
7221
7222 if (em) {
7223 if (em->start > start || btrfs_extent_map_end(em) <= start)
7224 btrfs_free_extent_map(em);
7225 else if (em->disk_bytenr == EXTENT_MAP_INLINE && folio)
7226 btrfs_free_extent_map(em);
7227 else
7228 goto out;
7229 }
7230 em = btrfs_alloc_extent_map();
7231 if (!em) {
7232 ret = -ENOMEM;
7233 goto out;
7234 }
7235 em->start = EXTENT_MAP_HOLE;
7236 em->disk_bytenr = EXTENT_MAP_HOLE;
7237 em->len = (u64)-1;
7238
7239 path = btrfs_alloc_path();
7240 if (!path) {
7241 ret = -ENOMEM;
7242 goto out;
7243 }
7244
7245 /* Chances are we'll be called again, so go ahead and do readahead */
7246 path->reada = READA_FORWARD;
7247
7248 /*
7249 * The same explanation in load_free_space_cache applies here as well,
7250 * we only read when we're loading the free space cache, and at that
7251 * point the commit_root has everything we need.
7252 */
7253 if (btrfs_is_free_space_inode(inode)) {
7254 path->search_commit_root = true;
7255 path->skip_locking = true;
7256 }
7257
7258 ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0);
7259 if (ret < 0) {
7260 goto out;
7261 } else if (ret > 0) {
7262 if (path->slots[0] == 0)
7263 goto not_found;
7264 path->slots[0]--;
7265 ret = 0;
7266 }
7267
7268 leaf = path->nodes[0];
7269 item = btrfs_item_ptr(leaf, path->slots[0],
7270 struct btrfs_file_extent_item);
7271 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7272 if (found_key.objectid != objectid ||
7273 found_key.type != BTRFS_EXTENT_DATA_KEY) {
7274 /*
7275 * If we backup past the first extent we want to move forward
7276 * and see if there is an extent in front of us, otherwise we'll
7277 * say there is a hole for our whole search range which can
7278 * cause problems.
7279 */
7280 extent_end = start;
7281 goto next;
7282 }
7283
7284 extent_type = btrfs_file_extent_type(leaf, item);
7285 extent_start = found_key.offset;
7286 extent_end = btrfs_file_extent_end(path);
7287 if (extent_type == BTRFS_FILE_EXTENT_REG ||
7288 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
7289 /* Only regular file could have regular/prealloc extent */
7290 if (unlikely(!S_ISREG(inode->vfs_inode.i_mode))) {
7291 ret = -EUCLEAN;
7292 btrfs_crit(fs_info,
7293 "regular/prealloc extent found for non-regular inode %llu",
7294 btrfs_ino(inode));
7295 goto out;
7296 }
7297 trace_btrfs_get_extent_show_fi_regular(inode, leaf, item,
7298 extent_start);
7299 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
7300 trace_btrfs_get_extent_show_fi_inline(inode, leaf, item,
7301 path->slots[0],
7302 extent_start);
7303 }
7304 next:
7305 if (start >= extent_end) {
7306 path->slots[0]++;
7307 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
7308 ret = btrfs_next_leaf(root, path);
7309 if (ret < 0)
7310 goto out;
7311 else if (ret > 0)
7312 goto not_found;
7313
7314 leaf = path->nodes[0];
7315 }
7316 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7317 if (found_key.objectid != objectid ||
7318 found_key.type != BTRFS_EXTENT_DATA_KEY)
7319 goto not_found;
7320 if (start + len <= found_key.offset)
7321 goto not_found;
7322 if (start > found_key.offset)
7323 goto next;
7324
7325 /* New extent overlaps with existing one */
7326 em->start = start;
7327 em->len = found_key.offset - start;
7328 em->disk_bytenr = EXTENT_MAP_HOLE;
7329 goto insert;
7330 }
7331
7332 btrfs_extent_item_to_extent_map(inode, path, item, em);
7333
7334 if (extent_type == BTRFS_FILE_EXTENT_REG ||
7335 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
7336 goto insert;
7337 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
7338 /*
7339 * Inline extent can only exist at file offset 0. This is
7340 * ensured by tree-checker and inline extent creation path.
7341 * Thus all members representing file offsets should be zero.
7342 */
7343 ASSERT(extent_start == 0);
7344 ASSERT(em->start == 0);
7345
7346 /*
7347 * btrfs_extent_item_to_extent_map() should have properly
7348 * initialized em members already.
7349 *
7350 * Other members are not utilized for inline extents.
7351 */
7352 ASSERT(em->disk_bytenr == EXTENT_MAP_INLINE);
7353 ASSERT(em->len == fs_info->sectorsize);
7354
7355 ret = read_inline_extent(path, folio);
7356 if (ret < 0)
7357 goto out;
7358 goto insert;
7359 }
7360 not_found:
7361 em->start = start;
7362 em->len = len;
7363 em->disk_bytenr = EXTENT_MAP_HOLE;
7364 insert:
7365 ret = 0;
7366 btrfs_release_path(path);
7367 if (unlikely(em->start > start || btrfs_extent_map_end(em) <= start)) {
7368 btrfs_err(fs_info,
7369 "bad extent! em: [%llu %llu] passed [%llu %llu]",
7370 em->start, em->len, start, len);
7371 ret = -EIO;
7372 goto out;
7373 }
7374
7375 write_lock(&em_tree->lock);
7376 ret = btrfs_add_extent_mapping(inode, &em, start, len);
7377 write_unlock(&em_tree->lock);
7378 out:
7379 btrfs_free_path(path);
7380
7381 trace_btrfs_get_extent(root, inode, em);
7382
7383 if (ret) {
7384 btrfs_free_extent_map(em);
7385 return ERR_PTR(ret);
7386 }
7387 return em;
7388 }
7389
btrfs_extent_readonly(struct btrfs_fs_info * fs_info,u64 bytenr)7390 static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
7391 {
7392 struct btrfs_block_group *block_group;
7393 bool readonly = false;
7394
7395 block_group = btrfs_lookup_block_group(fs_info, bytenr);
7396 if (!block_group || block_group->ro)
7397 readonly = true;
7398 if (block_group)
7399 btrfs_put_block_group(block_group);
7400 return readonly;
7401 }
7402
7403 /*
7404 * Check if we can do nocow write into the range [@offset, @offset + @len)
7405 *
7406 * @offset: File offset
7407 * @len: The length to write, will be updated to the nocow writeable
7408 * range
7409 * @orig_start: (optional) Return the original file offset of the file extent
7410 * @orig_len: (optional) Return the original on-disk length of the file extent
7411 * @ram_bytes: (optional) Return the ram_bytes of the file extent
7412 *
7413 * Return:
7414 * >0 and update @len if we can do nocow write
7415 * 0 if we can't do nocow write
7416 * <0 if error happened
7417 *
7418 * NOTE: This only checks the file extents, caller is responsible to wait for
7419 * any ordered extents.
7420 */
can_nocow_extent(struct btrfs_inode * inode,u64 offset,u64 * len,struct btrfs_file_extent * file_extent,bool nowait)7421 noinline int can_nocow_extent(struct btrfs_inode *inode, u64 offset, u64 *len,
7422 struct btrfs_file_extent *file_extent,
7423 bool nowait)
7424 {
7425 struct btrfs_root *root = inode->root;
7426 struct btrfs_fs_info *fs_info = root->fs_info;
7427 struct can_nocow_file_extent_args nocow_args = { 0 };
7428 BTRFS_PATH_AUTO_FREE(path);
7429 int ret;
7430 struct extent_buffer *leaf;
7431 struct extent_io_tree *io_tree = &inode->io_tree;
7432 struct btrfs_file_extent_item *fi;
7433 struct btrfs_key key;
7434 int found_type;
7435
7436 path = btrfs_alloc_path();
7437 if (!path)
7438 return -ENOMEM;
7439 path->nowait = nowait;
7440
7441 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
7442 offset, 0);
7443 if (ret < 0)
7444 return ret;
7445
7446 if (ret == 1) {
7447 if (path->slots[0] == 0) {
7448 /* Can't find the item, must COW. */
7449 return 0;
7450 }
7451 path->slots[0]--;
7452 }
7453 ret = 0;
7454 leaf = path->nodes[0];
7455 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
7456 if (key.objectid != btrfs_ino(inode) ||
7457 key.type != BTRFS_EXTENT_DATA_KEY) {
7458 /* Not our file or wrong item type, must COW. */
7459 return 0;
7460 }
7461
7462 if (key.offset > offset) {
7463 /* Wrong offset, must COW. */
7464 return 0;
7465 }
7466
7467 if (btrfs_file_extent_end(path) <= offset)
7468 return 0;
7469
7470 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
7471 found_type = btrfs_file_extent_type(leaf, fi);
7472
7473 nocow_args.start = offset;
7474 nocow_args.end = offset + *len - 1;
7475 nocow_args.free_path = true;
7476
7477 ret = can_nocow_file_extent(path, &key, inode, &nocow_args);
7478 /* can_nocow_file_extent() has freed the path. */
7479 path = NULL;
7480
7481 if (ret != 1) {
7482 /* Treat errors as not being able to NOCOW. */
7483 return 0;
7484 }
7485
7486 if (btrfs_extent_readonly(fs_info,
7487 nocow_args.file_extent.disk_bytenr +
7488 nocow_args.file_extent.offset))
7489 return 0;
7490
7491 if (!(inode->flags & BTRFS_INODE_NODATACOW) &&
7492 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7493 u64 range_end;
7494
7495 range_end = round_up(offset + nocow_args.file_extent.num_bytes,
7496 root->fs_info->sectorsize) - 1;
7497 ret = btrfs_test_range_bit_exists(io_tree, offset, range_end,
7498 EXTENT_DELALLOC);
7499 if (ret)
7500 return -EAGAIN;
7501 }
7502
7503 if (file_extent)
7504 memcpy(file_extent, &nocow_args.file_extent, sizeof(*file_extent));
7505
7506 *len = nocow_args.file_extent.num_bytes;
7507
7508 return 1;
7509 }
7510
7511 /* The callers of this must take lock_extent() */
btrfs_create_io_em(struct btrfs_inode * inode,u64 start,const struct btrfs_file_extent * file_extent,int type)7512 struct extent_map *btrfs_create_io_em(struct btrfs_inode *inode, u64 start,
7513 const struct btrfs_file_extent *file_extent,
7514 int type)
7515 {
7516 struct extent_map *em;
7517 int ret;
7518
7519 /*
7520 * Note the missing NOCOW type.
7521 *
7522 * For pure NOCOW writes, we should not create an io extent map, but
7523 * just reusing the existing one.
7524 * Only PREALLOC writes (NOCOW write into preallocated range) can
7525 * create an io extent map.
7526 */
7527 ASSERT(type == BTRFS_ORDERED_PREALLOC ||
7528 type == BTRFS_ORDERED_COMPRESSED ||
7529 type == BTRFS_ORDERED_REGULAR);
7530
7531 switch (type) {
7532 case BTRFS_ORDERED_PREALLOC:
7533 /* We're only referring part of a larger preallocated extent. */
7534 ASSERT(file_extent->num_bytes <= file_extent->ram_bytes);
7535 break;
7536 case BTRFS_ORDERED_REGULAR:
7537 /* COW results a new extent matching our file extent size. */
7538 ASSERT(file_extent->disk_num_bytes == file_extent->num_bytes);
7539 ASSERT(file_extent->ram_bytes == file_extent->num_bytes);
7540
7541 /* Since it's a new extent, we should not have any offset. */
7542 ASSERT(file_extent->offset == 0);
7543 break;
7544 case BTRFS_ORDERED_COMPRESSED:
7545 /* Must be compressed. */
7546 ASSERT(file_extent->compression != BTRFS_COMPRESS_NONE);
7547
7548 /*
7549 * Encoded write can make us to refer to part of the
7550 * uncompressed extent.
7551 */
7552 ASSERT(file_extent->num_bytes <= file_extent->ram_bytes);
7553 break;
7554 }
7555
7556 em = btrfs_alloc_extent_map();
7557 if (!em)
7558 return ERR_PTR(-ENOMEM);
7559
7560 em->start = start;
7561 em->len = file_extent->num_bytes;
7562 em->disk_bytenr = file_extent->disk_bytenr;
7563 em->disk_num_bytes = file_extent->disk_num_bytes;
7564 em->ram_bytes = file_extent->ram_bytes;
7565 em->generation = -1;
7566 em->offset = file_extent->offset;
7567 em->flags |= EXTENT_FLAG_PINNED;
7568 if (type == BTRFS_ORDERED_COMPRESSED)
7569 btrfs_extent_map_set_compression(em, file_extent->compression);
7570
7571 ret = btrfs_replace_extent_map_range(inode, em, true);
7572 if (ret) {
7573 btrfs_free_extent_map(em);
7574 return ERR_PTR(ret);
7575 }
7576
7577 /* em got 2 refs now, callers needs to do btrfs_free_extent_map once. */
7578 return em;
7579 }
7580
7581 /*
7582 * For release_folio() and invalidate_folio() we have a race window where
7583 * folio_end_writeback() is called but the subpage spinlock is not yet released.
7584 * If we continue to release/invalidate the page, we could cause use-after-free
7585 * for subpage spinlock. So this function is to spin and wait for subpage
7586 * spinlock.
7587 */
wait_subpage_spinlock(struct folio * folio)7588 static void wait_subpage_spinlock(struct folio *folio)
7589 {
7590 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
7591 struct btrfs_folio_state *bfs;
7592
7593 if (!btrfs_is_subpage(fs_info, folio))
7594 return;
7595
7596 ASSERT(folio_test_private(folio) && folio_get_private(folio));
7597 bfs = folio_get_private(folio);
7598
7599 /*
7600 * This may look insane as we just acquire the spinlock and release it,
7601 * without doing anything. But we just want to make sure no one is
7602 * still holding the subpage spinlock.
7603 * And since the page is not dirty nor writeback, and we have page
7604 * locked, the only possible way to hold a spinlock is from the endio
7605 * function to clear page writeback.
7606 *
7607 * Here we just acquire the spinlock so that all existing callers
7608 * should exit and we're safe to release/invalidate the page.
7609 */
7610 spin_lock_irq(&bfs->lock);
7611 spin_unlock_irq(&bfs->lock);
7612 }
7613
btrfs_launder_folio(struct folio * folio)7614 static int btrfs_launder_folio(struct folio *folio)
7615 {
7616 return btrfs_qgroup_free_data(folio_to_inode(folio), NULL, folio_pos(folio),
7617 folio_size(folio), NULL);
7618 }
7619
__btrfs_release_folio(struct folio * folio,gfp_t gfp_flags)7620 static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
7621 {
7622 if (try_release_extent_mapping(folio, gfp_flags)) {
7623 wait_subpage_spinlock(folio);
7624 clear_folio_extent_mapped(folio);
7625 return true;
7626 }
7627 return false;
7628 }
7629
btrfs_release_folio(struct folio * folio,gfp_t gfp_flags)7630 static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
7631 {
7632 if (folio_test_writeback(folio) || folio_test_dirty(folio))
7633 return false;
7634 return __btrfs_release_folio(folio, gfp_flags);
7635 }
7636
7637 #ifdef CONFIG_MIGRATION
btrfs_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)7638 static int btrfs_migrate_folio(struct address_space *mapping,
7639 struct folio *dst, struct folio *src,
7640 enum migrate_mode mode)
7641 {
7642 int ret = filemap_migrate_folio(mapping, dst, src, mode);
7643
7644 if (ret)
7645 return ret;
7646
7647 if (folio_test_ordered(src)) {
7648 folio_clear_ordered(src);
7649 folio_set_ordered(dst);
7650 }
7651
7652 return 0;
7653 }
7654 #else
7655 #define btrfs_migrate_folio NULL
7656 #endif
7657
btrfs_invalidate_folio(struct folio * folio,size_t offset,size_t length)7658 static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
7659 size_t length)
7660 {
7661 struct btrfs_inode *inode = folio_to_inode(folio);
7662 struct btrfs_fs_info *fs_info = inode->root->fs_info;
7663 struct extent_io_tree *tree = &inode->io_tree;
7664 struct extent_state *cached_state = NULL;
7665 u64 page_start = folio_pos(folio);
7666 u64 page_end = page_start + folio_size(folio) - 1;
7667 u64 cur;
7668 int inode_evicting = inode_state_read_once(&inode->vfs_inode) & I_FREEING;
7669
7670 /*
7671 * We have folio locked so no new ordered extent can be created on this
7672 * page, nor bio can be submitted for this folio.
7673 *
7674 * But already submitted bio can still be finished on this folio.
7675 * Furthermore, endio function won't skip folio which has Ordered
7676 * already cleared, so it's possible for endio and
7677 * invalidate_folio to do the same ordered extent accounting twice
7678 * on one folio.
7679 *
7680 * So here we wait for any submitted bios to finish, so that we won't
7681 * do double ordered extent accounting on the same folio.
7682 */
7683 folio_wait_writeback(folio);
7684 wait_subpage_spinlock(folio);
7685
7686 /*
7687 * For subpage case, we have call sites like
7688 * btrfs_punch_hole_lock_range() which passes range not aligned to
7689 * sectorsize.
7690 * If the range doesn't cover the full folio, we don't need to and
7691 * shouldn't clear page extent mapped, as folio->private can still
7692 * record subpage dirty bits for other part of the range.
7693 *
7694 * For cases that invalidate the full folio even the range doesn't
7695 * cover the full folio, like invalidating the last folio, we're
7696 * still safe to wait for ordered extent to finish.
7697 */
7698 if (!(offset == 0 && length == folio_size(folio))) {
7699 btrfs_release_folio(folio, GFP_NOFS);
7700 return;
7701 }
7702
7703 if (!inode_evicting)
7704 btrfs_lock_extent(tree, page_start, page_end, &cached_state);
7705
7706 cur = page_start;
7707 while (cur < page_end) {
7708 struct btrfs_ordered_extent *ordered;
7709 u64 range_end;
7710 u32 range_len;
7711 u32 extra_flags = 0;
7712
7713 ordered = btrfs_lookup_first_ordered_range(inode, cur,
7714 page_end + 1 - cur);
7715 if (!ordered) {
7716 range_end = page_end;
7717 /*
7718 * No ordered extent covering this range, we are safe
7719 * to delete all extent states in the range.
7720 */
7721 extra_flags = EXTENT_CLEAR_ALL_BITS;
7722 goto next;
7723 }
7724 if (ordered->file_offset > cur) {
7725 /*
7726 * There is a range between [cur, oe->file_offset) not
7727 * covered by any ordered extent.
7728 * We are safe to delete all extent states, and handle
7729 * the ordered extent in the next iteration.
7730 */
7731 range_end = ordered->file_offset - 1;
7732 extra_flags = EXTENT_CLEAR_ALL_BITS;
7733 goto next;
7734 }
7735
7736 range_end = min(ordered->file_offset + ordered->num_bytes - 1,
7737 page_end);
7738 ASSERT(range_end + 1 - cur < U32_MAX);
7739 range_len = range_end + 1 - cur;
7740 if (!btrfs_folio_test_ordered(fs_info, folio, cur, range_len)) {
7741 /*
7742 * If Ordered is cleared, it means endio has
7743 * already been executed for the range.
7744 * We can't delete the extent states as
7745 * btrfs_finish_ordered_io() may still use some of them.
7746 */
7747 goto next;
7748 }
7749 btrfs_folio_clear_ordered(fs_info, folio, cur, range_len);
7750
7751 /*
7752 * IO on this page will never be started, so we need to account
7753 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW
7754 * here, must leave that up for the ordered extent completion.
7755 *
7756 * This will also unlock the range for incoming
7757 * btrfs_finish_ordered_io().
7758 */
7759 if (!inode_evicting)
7760 btrfs_clear_extent_bit(tree, cur, range_end,
7761 EXTENT_DELALLOC |
7762 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
7763 EXTENT_DEFRAG, &cached_state);
7764
7765 spin_lock(&inode->ordered_tree_lock);
7766 set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
7767 ordered->truncated_len = min(ordered->truncated_len,
7768 cur - ordered->file_offset);
7769 spin_unlock(&inode->ordered_tree_lock);
7770
7771 /*
7772 * If the ordered extent has finished, we're safe to delete all
7773 * the extent states of the range, otherwise
7774 * btrfs_finish_ordered_io() will get executed by endio for
7775 * other pages, so we can't delete extent states.
7776 */
7777 if (btrfs_dec_test_ordered_pending(inode, &ordered,
7778 cur, range_end + 1 - cur)) {
7779 btrfs_finish_ordered_io(ordered);
7780 /*
7781 * The ordered extent has finished, now we're again
7782 * safe to delete all extent states of the range.
7783 */
7784 extra_flags = EXTENT_CLEAR_ALL_BITS;
7785 }
7786 next:
7787 if (ordered)
7788 btrfs_put_ordered_extent(ordered);
7789 /*
7790 * Qgroup reserved space handler
7791 * Sector(s) here will be either:
7792 *
7793 * 1) Already written to disk or bio already finished
7794 * Then its QGROUP_RESERVED bit in io_tree is already cleared.
7795 * Qgroup will be handled by its qgroup_record then.
7796 * btrfs_qgroup_free_data() call will do nothing here.
7797 *
7798 * 2) Not written to disk yet
7799 * Then btrfs_qgroup_free_data() call will clear the
7800 * QGROUP_RESERVED bit of its io_tree, and free the qgroup
7801 * reserved data space.
7802 * Since the IO will never happen for this page.
7803 */
7804 btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur, NULL);
7805 if (!inode_evicting)
7806 btrfs_clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED |
7807 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
7808 EXTENT_DEFRAG | extra_flags,
7809 &cached_state);
7810 cur = range_end + 1;
7811 }
7812 /*
7813 * We have iterated through all ordered extents of the page, the page
7814 * should not have Ordered anymore, or the above iteration
7815 * did something wrong.
7816 */
7817 ASSERT(!folio_test_ordered(folio));
7818 btrfs_folio_clear_checked(fs_info, folio, folio_pos(folio), folio_size(folio));
7819 if (!inode_evicting)
7820 __btrfs_release_folio(folio, GFP_NOFS);
7821 clear_folio_extent_mapped(folio);
7822 }
7823
btrfs_truncate(struct btrfs_inode * inode,bool skip_writeback)7824 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
7825 {
7826 struct btrfs_truncate_control control = {
7827 .inode = inode,
7828 .ino = btrfs_ino(inode),
7829 .min_type = BTRFS_EXTENT_DATA_KEY,
7830 .clear_extent_range = true,
7831 .new_size = inode->vfs_inode.i_size,
7832 };
7833 struct btrfs_root *root = inode->root;
7834 struct btrfs_fs_info *fs_info = root->fs_info;
7835 struct btrfs_block_rsv rsv;
7836 int ret;
7837 struct btrfs_trans_handle *trans;
7838 const u64 min_size = btrfs_calc_metadata_size(fs_info, 1);
7839 const u64 lock_start = round_down(inode->vfs_inode.i_size, fs_info->sectorsize);
7840 const u64 i_size_up = round_up(inode->vfs_inode.i_size, fs_info->sectorsize);
7841
7842 /* Our inode is locked and the i_size can't be changed concurrently. */
7843 btrfs_assert_inode_locked(inode);
7844
7845 if (!skip_writeback) {
7846 ret = btrfs_wait_ordered_range(inode, lock_start, (u64)-1);
7847 if (ret)
7848 return ret;
7849 }
7850
7851 /*
7852 * Yes ladies and gentlemen, this is indeed ugly. We have a couple of
7853 * things going on here:
7854 *
7855 * 1) We need to reserve space to update our inode.
7856 *
7857 * 2) We need to have something to cache all the space that is going to
7858 * be free'd up by the truncate operation, but also have some slack
7859 * space reserved in case it uses space during the truncate (thank you
7860 * very much snapshotting).
7861 *
7862 * And we need these to be separate. The fact is we can use a lot of
7863 * space doing the truncate, and we have no earthly idea how much space
7864 * we will use, so we need the truncate reservation to be separate so it
7865 * doesn't end up using space reserved for updating the inode. We also
7866 * need to be able to stop the transaction and start a new one, which
7867 * means we need to be able to update the inode several times, and we
7868 * have no idea of knowing how many times that will be, so we can't just
7869 * reserve 1 item for the entirety of the operation, so that has to be
7870 * done separately as well.
7871 *
7872 * So that leaves us with
7873 *
7874 * 1) rsv - for the truncate reservation, which we will steal from the
7875 * transaction reservation.
7876 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for
7877 * updating the inode.
7878 */
7879 btrfs_init_metadata_block_rsv(fs_info, &rsv, BTRFS_BLOCK_RSV_TEMP);
7880 rsv.size = min_size;
7881 rsv.failfast = true;
7882
7883 /*
7884 * 1 for the truncate slack space
7885 * 1 for updating the inode.
7886 */
7887 trans = btrfs_start_transaction(root, 2);
7888 if (IS_ERR(trans)) {
7889 ret = PTR_ERR(trans);
7890 goto out;
7891 }
7892
7893 /* Migrate the slack space for the truncate to our reserve */
7894 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, &rsv,
7895 min_size, false);
7896 /*
7897 * We have reserved 2 metadata units when we started the transaction and
7898 * min_size matches 1 unit, so this should never fail, but if it does,
7899 * it's not critical we just fail truncation.
7900 */
7901 if (WARN_ON(ret)) {
7902 btrfs_end_transaction(trans);
7903 goto out;
7904 }
7905
7906 trans->block_rsv = &rsv;
7907
7908 while (1) {
7909 struct extent_state *cached_state = NULL;
7910
7911 btrfs_lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
7912 /*
7913 * We want to drop from the next block forward in case this new
7914 * size is not block aligned since we will be keeping the last
7915 * block of the extent just the way it is.
7916 */
7917 btrfs_drop_extent_map_range(inode, i_size_up, (u64)-1, false);
7918
7919 ret = btrfs_truncate_inode_items(trans, root, &control);
7920
7921 inode_sub_bytes(&inode->vfs_inode, control.sub_bytes);
7922 btrfs_inode_safe_disk_i_size_write(inode, control.last_size);
7923
7924 btrfs_unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
7925
7926 trans->block_rsv = &fs_info->trans_block_rsv;
7927 if (ret != -ENOSPC && ret != -EAGAIN)
7928 break;
7929
7930 ret = btrfs_update_inode(trans, inode);
7931 if (ret)
7932 break;
7933
7934 btrfs_end_transaction(trans);
7935 btrfs_btree_balance_dirty(fs_info);
7936
7937 trans = btrfs_start_transaction(root, 2);
7938 if (IS_ERR(trans)) {
7939 ret = PTR_ERR(trans);
7940 trans = NULL;
7941 break;
7942 }
7943
7944 btrfs_block_rsv_release(fs_info, &rsv, -1, NULL);
7945 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
7946 &rsv, min_size, false);
7947 /*
7948 * We have reserved 2 metadata units when we started the
7949 * transaction and min_size matches 1 unit, so this should never
7950 * fail, but if it does, it's not critical we just fail truncation.
7951 */
7952 if (WARN_ON(ret))
7953 break;
7954
7955 trans->block_rsv = &rsv;
7956 }
7957
7958 /*
7959 * We can't call btrfs_truncate_block inside a trans handle as we could
7960 * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we
7961 * know we've truncated everything except the last little bit, and can
7962 * do btrfs_truncate_block and then update the disk_i_size.
7963 */
7964 if (ret == BTRFS_NEED_TRUNCATE_BLOCK) {
7965 btrfs_end_transaction(trans);
7966 btrfs_btree_balance_dirty(fs_info);
7967
7968 ret = btrfs_truncate_block(inode, inode->vfs_inode.i_size,
7969 inode->vfs_inode.i_size, (u64)-1);
7970 if (ret)
7971 goto out;
7972 trans = btrfs_start_transaction(root, 1);
7973 if (IS_ERR(trans)) {
7974 ret = PTR_ERR(trans);
7975 goto out;
7976 }
7977 btrfs_inode_safe_disk_i_size_write(inode, 0);
7978 }
7979
7980 if (trans) {
7981 int ret2;
7982
7983 trans->block_rsv = &fs_info->trans_block_rsv;
7984 ret2 = btrfs_update_inode(trans, inode);
7985 if (ret2 && !ret)
7986 ret = ret2;
7987
7988 ret2 = btrfs_end_transaction(trans);
7989 if (ret2 && !ret)
7990 ret = ret2;
7991 btrfs_btree_balance_dirty(fs_info);
7992 }
7993 out:
7994 btrfs_block_rsv_release(fs_info, &rsv, (u64)-1, NULL);
7995 /*
7996 * So if we truncate and then write and fsync we normally would just
7997 * write the extents that changed, which is a problem if we need to
7998 * first truncate that entire inode. So set this flag so we write out
7999 * all of the extents in the inode to the sync log so we're completely
8000 * safe.
8001 *
8002 * If no extents were dropped or trimmed we don't need to force the next
8003 * fsync to truncate all the inode's items from the log and re-log them
8004 * all. This means the truncate operation did not change the file size,
8005 * or changed it to a smaller size but there was only an implicit hole
8006 * between the old i_size and the new i_size, and there were no prealloc
8007 * extents beyond i_size to drop.
8008 */
8009 if (control.extents_found > 0)
8010 btrfs_set_inode_full_sync(inode);
8011
8012 return ret;
8013 }
8014
btrfs_new_subvol_inode(struct mnt_idmap * idmap,struct inode * dir)8015 struct inode *btrfs_new_subvol_inode(struct mnt_idmap *idmap,
8016 struct inode *dir)
8017 {
8018 struct inode *inode;
8019
8020 inode = new_inode(dir->i_sb);
8021 if (inode) {
8022 /*
8023 * Subvolumes don't inherit the sgid bit or the parent's gid if
8024 * the parent's sgid bit is set. This is probably a bug.
8025 */
8026 inode_init_owner(idmap, inode, NULL,
8027 S_IFDIR | (~current_umask() & S_IRWXUGO));
8028 inode->i_op = &btrfs_dir_inode_operations;
8029 inode->i_fop = &btrfs_dir_file_operations;
8030 }
8031 return inode;
8032 }
8033
btrfs_alloc_inode(struct super_block * sb)8034 struct inode *btrfs_alloc_inode(struct super_block *sb)
8035 {
8036 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
8037 struct btrfs_inode *ei;
8038 struct inode *inode;
8039
8040 ei = alloc_inode_sb(sb, btrfs_inode_cachep, GFP_KERNEL);
8041 if (!ei)
8042 return NULL;
8043
8044 ei->root = NULL;
8045 ei->generation = 0;
8046 ei->last_trans = 0;
8047 ei->last_sub_trans = 0;
8048 ei->logged_trans = 0;
8049 ei->delalloc_bytes = 0;
8050 /* new_delalloc_bytes and last_dir_index_offset are in a union. */
8051 ei->new_delalloc_bytes = 0;
8052 ei->defrag_bytes = 0;
8053 ei->disk_i_size = 0;
8054 ei->flags = 0;
8055 ei->ro_flags = 0;
8056 /*
8057 * ->index_cnt will be properly initialized later when creating a new
8058 * inode (btrfs_create_new_inode()) or when reading an existing inode
8059 * from disk (btrfs_read_locked_inode()).
8060 */
8061 ei->csum_bytes = 0;
8062 ei->dir_index = 0;
8063 ei->last_unlink_trans = 0;
8064 ei->last_reflink_trans = 0;
8065 ei->last_log_commit = 0;
8066
8067 spin_lock_init(&ei->lock);
8068 ei->outstanding_extents = 0;
8069 if (sb->s_magic != BTRFS_TEST_MAGIC)
8070 btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv,
8071 BTRFS_BLOCK_RSV_DELALLOC);
8072 ei->runtime_flags = 0;
8073 ei->prop_compress = BTRFS_COMPRESS_NONE;
8074 ei->defrag_compress = BTRFS_COMPRESS_NONE;
8075
8076 ei->delayed_node = NULL;
8077
8078 ei->i_otime_sec = 0;
8079 ei->i_otime_nsec = 0;
8080
8081 inode = &ei->vfs_inode;
8082 btrfs_extent_map_tree_init(&ei->extent_tree);
8083
8084 /* This io tree sets the valid inode. */
8085 btrfs_extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO);
8086 ei->io_tree.inode = ei;
8087
8088 ei->file_extent_tree = NULL;
8089
8090 mutex_init(&ei->log_mutex);
8091 spin_lock_init(&ei->ordered_tree_lock);
8092 ei->ordered_tree = RB_ROOT;
8093 ei->ordered_tree_last = NULL;
8094 INIT_LIST_HEAD(&ei->delalloc_inodes);
8095 INIT_LIST_HEAD(&ei->delayed_iput);
8096 init_rwsem(&ei->i_mmap_lock);
8097
8098 return inode;
8099 }
8100
8101 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
btrfs_test_destroy_inode(struct inode * inode)8102 void btrfs_test_destroy_inode(struct inode *inode)
8103 {
8104 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
8105 kfree(BTRFS_I(inode)->file_extent_tree);
8106 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
8107 }
8108 #endif
8109
btrfs_free_inode(struct inode * inode)8110 void btrfs_free_inode(struct inode *inode)
8111 {
8112 kfree(BTRFS_I(inode)->file_extent_tree);
8113 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
8114 }
8115
btrfs_destroy_inode(struct inode * vfs_inode)8116 void btrfs_destroy_inode(struct inode *vfs_inode)
8117 {
8118 struct btrfs_ordered_extent *ordered;
8119 struct btrfs_inode *inode = BTRFS_I(vfs_inode);
8120 struct btrfs_root *root = inode->root;
8121 bool freespace_inode;
8122
8123 WARN_ON(!hlist_empty(&vfs_inode->i_dentry));
8124 WARN_ON(vfs_inode->i_data.nrpages);
8125 WARN_ON(inode->block_rsv.reserved);
8126 WARN_ON(inode->block_rsv.size);
8127 WARN_ON(inode->outstanding_extents);
8128 if (!S_ISDIR(vfs_inode->i_mode)) {
8129 WARN_ON(inode->delalloc_bytes);
8130 WARN_ON(inode->new_delalloc_bytes);
8131 WARN_ON(inode->csum_bytes);
8132 }
8133 if (!root || !btrfs_is_data_reloc_root(root))
8134 WARN_ON(inode->defrag_bytes);
8135
8136 /*
8137 * This can happen where we create an inode, but somebody else also
8138 * created the same inode and we need to destroy the one we already
8139 * created.
8140 */
8141 if (!root)
8142 return;
8143
8144 /*
8145 * If this is a free space inode do not take the ordered extents lockdep
8146 * map.
8147 */
8148 freespace_inode = btrfs_is_free_space_inode(inode);
8149
8150 while (1) {
8151 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
8152 if (!ordered)
8153 break;
8154 else {
8155 btrfs_err(root->fs_info,
8156 "found ordered extent %llu %llu on inode cleanup",
8157 ordered->file_offset, ordered->num_bytes);
8158
8159 if (!freespace_inode)
8160 btrfs_lockdep_acquire(root->fs_info, btrfs_ordered_extent);
8161
8162 btrfs_remove_ordered_extent(inode, ordered);
8163 btrfs_put_ordered_extent(ordered);
8164 btrfs_put_ordered_extent(ordered);
8165 }
8166 }
8167 btrfs_qgroup_check_reserved_leak(inode);
8168 btrfs_del_inode_from_root(inode);
8169 btrfs_drop_extent_map_range(inode, 0, (u64)-1, false);
8170 btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1);
8171 btrfs_put_root(inode->root);
8172 }
8173
btrfs_drop_inode(struct inode * inode)8174 int btrfs_drop_inode(struct inode *inode)
8175 {
8176 struct btrfs_root *root = BTRFS_I(inode)->root;
8177
8178 if (root == NULL)
8179 return 1;
8180
8181 /* the snap/subvol tree is on deleting */
8182 if (btrfs_root_refs(&root->root_item) == 0)
8183 return 1;
8184 else
8185 return inode_generic_drop(inode);
8186 }
8187
init_once(void * foo)8188 static void init_once(void *foo)
8189 {
8190 struct btrfs_inode *ei = foo;
8191
8192 inode_init_once(&ei->vfs_inode);
8193 }
8194
btrfs_destroy_cachep(void)8195 void __cold btrfs_destroy_cachep(void)
8196 {
8197 /*
8198 * Make sure all delayed rcu free inodes are flushed before we
8199 * destroy cache.
8200 */
8201 rcu_barrier();
8202 kmem_cache_destroy(btrfs_inode_cachep);
8203 }
8204
btrfs_init_cachep(void)8205 int __init btrfs_init_cachep(void)
8206 {
8207 btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
8208 sizeof(struct btrfs_inode), 0,
8209 SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT,
8210 init_once);
8211 if (!btrfs_inode_cachep)
8212 return -ENOMEM;
8213
8214 return 0;
8215 }
8216
btrfs_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int flags)8217 static int btrfs_getattr(struct mnt_idmap *idmap,
8218 const struct path *path, struct kstat *stat,
8219 u32 request_mask, unsigned int flags)
8220 {
8221 u64 delalloc_bytes;
8222 u64 inode_bytes;
8223 struct inode *inode = d_inode(path->dentry);
8224 u32 blocksize = btrfs_sb(inode->i_sb)->sectorsize;
8225 u32 bi_flags = BTRFS_I(inode)->flags;
8226 u32 bi_ro_flags = BTRFS_I(inode)->ro_flags;
8227
8228 stat->result_mask |= STATX_BTIME;
8229 stat->btime.tv_sec = BTRFS_I(inode)->i_otime_sec;
8230 stat->btime.tv_nsec = BTRFS_I(inode)->i_otime_nsec;
8231 if (bi_flags & BTRFS_INODE_APPEND)
8232 stat->attributes |= STATX_ATTR_APPEND;
8233 if (bi_flags & BTRFS_INODE_COMPRESS)
8234 stat->attributes |= STATX_ATTR_COMPRESSED;
8235 if (bi_flags & BTRFS_INODE_IMMUTABLE)
8236 stat->attributes |= STATX_ATTR_IMMUTABLE;
8237 if (bi_flags & BTRFS_INODE_NODUMP)
8238 stat->attributes |= STATX_ATTR_NODUMP;
8239 if (bi_ro_flags & BTRFS_INODE_RO_VERITY)
8240 stat->attributes |= STATX_ATTR_VERITY;
8241
8242 stat->attributes_mask |= (STATX_ATTR_APPEND |
8243 STATX_ATTR_COMPRESSED |
8244 STATX_ATTR_IMMUTABLE |
8245 STATX_ATTR_NODUMP);
8246
8247 generic_fillattr(idmap, request_mask, inode, stat);
8248 stat->dev = BTRFS_I(inode)->root->anon_dev;
8249
8250 stat->subvol = btrfs_root_id(BTRFS_I(inode)->root);
8251 stat->result_mask |= STATX_SUBVOL;
8252
8253 spin_lock(&BTRFS_I(inode)->lock);
8254 delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes;
8255 inode_bytes = inode_get_bytes(inode);
8256 spin_unlock(&BTRFS_I(inode)->lock);
8257 stat->blocks = (ALIGN(inode_bytes, blocksize) +
8258 ALIGN(delalloc_bytes, blocksize)) >> SECTOR_SHIFT;
8259 return 0;
8260 }
8261
btrfs_rename_exchange(struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry)8262 static int btrfs_rename_exchange(struct inode *old_dir,
8263 struct dentry *old_dentry,
8264 struct inode *new_dir,
8265 struct dentry *new_dentry)
8266 {
8267 struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir);
8268 struct btrfs_trans_handle *trans;
8269 unsigned int trans_num_items;
8270 struct btrfs_root *root = BTRFS_I(old_dir)->root;
8271 struct btrfs_root *dest = BTRFS_I(new_dir)->root;
8272 struct inode *new_inode = new_dentry->d_inode;
8273 struct inode *old_inode = old_dentry->d_inode;
8274 struct btrfs_rename_ctx old_rename_ctx;
8275 struct btrfs_rename_ctx new_rename_ctx;
8276 u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
8277 u64 new_ino = btrfs_ino(BTRFS_I(new_inode));
8278 u64 old_idx = 0;
8279 u64 new_idx = 0;
8280 int ret;
8281 int ret2;
8282 bool need_abort = false;
8283 bool logs_pinned = false;
8284 struct fscrypt_name old_fname, new_fname;
8285 struct fscrypt_str *old_name, *new_name;
8286
8287 /*
8288 * For non-subvolumes allow exchange only within one subvolume, in the
8289 * same inode namespace. Two subvolumes (represented as directory) can
8290 * be exchanged as they're a logical link and have a fixed inode number.
8291 */
8292 if (root != dest &&
8293 (old_ino != BTRFS_FIRST_FREE_OBJECTID ||
8294 new_ino != BTRFS_FIRST_FREE_OBJECTID))
8295 return -EXDEV;
8296
8297 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname);
8298 if (ret)
8299 return ret;
8300
8301 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname);
8302 if (ret) {
8303 fscrypt_free_filename(&old_fname);
8304 return ret;
8305 }
8306
8307 old_name = &old_fname.disk_name;
8308 new_name = &new_fname.disk_name;
8309
8310 /* close the race window with snapshot create/destroy ioctl */
8311 if (old_ino == BTRFS_FIRST_FREE_OBJECTID ||
8312 new_ino == BTRFS_FIRST_FREE_OBJECTID)
8313 down_read(&fs_info->subvol_sem);
8314
8315 /*
8316 * For each inode:
8317 * 1 to remove old dir item
8318 * 1 to remove old dir index
8319 * 1 to add new dir item
8320 * 1 to add new dir index
8321 * 1 to update parent inode
8322 *
8323 * If the parents are the same, we only need to account for one
8324 */
8325 trans_num_items = (old_dir == new_dir ? 9 : 10);
8326 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8327 /*
8328 * 1 to remove old root ref
8329 * 1 to remove old root backref
8330 * 1 to add new root ref
8331 * 1 to add new root backref
8332 */
8333 trans_num_items += 4;
8334 } else {
8335 /*
8336 * 1 to update inode item
8337 * 1 to remove old inode ref
8338 * 1 to add new inode ref
8339 */
8340 trans_num_items += 3;
8341 }
8342 if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
8343 trans_num_items += 4;
8344 else
8345 trans_num_items += 3;
8346 trans = btrfs_start_transaction(root, trans_num_items);
8347 if (IS_ERR(trans)) {
8348 ret = PTR_ERR(trans);
8349 goto out_notrans;
8350 }
8351
8352 if (dest != root) {
8353 ret = btrfs_record_root_in_trans(trans, dest);
8354 if (ret)
8355 goto out_fail;
8356 }
8357
8358 /*
8359 * We need to find a free sequence number both in the source and
8360 * in the destination directory for the exchange.
8361 */
8362 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx);
8363 if (ret)
8364 goto out_fail;
8365 ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx);
8366 if (ret)
8367 goto out_fail;
8368
8369 BTRFS_I(old_inode)->dir_index = 0ULL;
8370 BTRFS_I(new_inode)->dir_index = 0ULL;
8371
8372 /* Reference for the source. */
8373 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8374 /* force full log commit if subvolume involved. */
8375 btrfs_set_log_full_commit(trans);
8376 } else {
8377 ret = btrfs_insert_inode_ref(trans, dest, new_name, old_ino,
8378 btrfs_ino(BTRFS_I(new_dir)),
8379 old_idx);
8380 if (ret)
8381 goto out_fail;
8382 need_abort = true;
8383 }
8384
8385 /* And now for the dest. */
8386 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
8387 /* force full log commit if subvolume involved. */
8388 btrfs_set_log_full_commit(trans);
8389 } else {
8390 ret = btrfs_insert_inode_ref(trans, root, old_name, new_ino,
8391 btrfs_ino(BTRFS_I(old_dir)),
8392 new_idx);
8393 if (ret) {
8394 if (unlikely(need_abort))
8395 btrfs_abort_transaction(trans, ret);
8396 goto out_fail;
8397 }
8398 }
8399
8400 /* Update inode version and ctime/mtime. */
8401 inode_inc_iversion(old_dir);
8402 inode_inc_iversion(new_dir);
8403 inode_inc_iversion(old_inode);
8404 inode_inc_iversion(new_inode);
8405 simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
8406
8407 if (old_ino != BTRFS_FIRST_FREE_OBJECTID &&
8408 new_ino != BTRFS_FIRST_FREE_OBJECTID) {
8409 /*
8410 * If we are renaming in the same directory (and it's not for
8411 * root entries) pin the log early to prevent any concurrent
8412 * task from logging the directory after we removed the old
8413 * entries and before we add the new entries, otherwise that
8414 * task can sync a log without any entry for the inodes we are
8415 * renaming and therefore replaying that log, if a power failure
8416 * happens after syncing the log, would result in deleting the
8417 * inodes.
8418 *
8419 * If the rename affects two different directories, we want to
8420 * make sure the that there's no log commit that contains
8421 * updates for only one of the directories but not for the
8422 * other.
8423 *
8424 * If we are renaming an entry for a root, we don't care about
8425 * log updates since we called btrfs_set_log_full_commit().
8426 */
8427 btrfs_pin_log_trans(root);
8428 btrfs_pin_log_trans(dest);
8429 logs_pinned = true;
8430 }
8431
8432 if (old_dentry->d_parent != new_dentry->d_parent) {
8433 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
8434 BTRFS_I(old_inode), true);
8435 btrfs_record_unlink_dir(trans, BTRFS_I(new_dir),
8436 BTRFS_I(new_inode), true);
8437 }
8438
8439 /* src is a subvolume */
8440 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8441 ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry);
8442 if (unlikely(ret)) {
8443 btrfs_abort_transaction(trans, ret);
8444 goto out_fail;
8445 }
8446 } else { /* src is an inode */
8447 ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
8448 BTRFS_I(old_dentry->d_inode),
8449 old_name, &old_rename_ctx);
8450 if (unlikely(ret)) {
8451 btrfs_abort_transaction(trans, ret);
8452 goto out_fail;
8453 }
8454 ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
8455 if (unlikely(ret)) {
8456 btrfs_abort_transaction(trans, ret);
8457 goto out_fail;
8458 }
8459 }
8460
8461 /* dest is a subvolume */
8462 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
8463 ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
8464 if (unlikely(ret)) {
8465 btrfs_abort_transaction(trans, ret);
8466 goto out_fail;
8467 }
8468 } else { /* dest is an inode */
8469 ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir),
8470 BTRFS_I(new_dentry->d_inode),
8471 new_name, &new_rename_ctx);
8472 if (unlikely(ret)) {
8473 btrfs_abort_transaction(trans, ret);
8474 goto out_fail;
8475 }
8476 ret = btrfs_update_inode(trans, BTRFS_I(new_inode));
8477 if (unlikely(ret)) {
8478 btrfs_abort_transaction(trans, ret);
8479 goto out_fail;
8480 }
8481 }
8482
8483 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
8484 new_name, 0, old_idx);
8485 if (unlikely(ret)) {
8486 btrfs_abort_transaction(trans, ret);
8487 goto out_fail;
8488 }
8489
8490 ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode),
8491 old_name, 0, new_idx);
8492 if (unlikely(ret)) {
8493 btrfs_abort_transaction(trans, ret);
8494 goto out_fail;
8495 }
8496
8497 if (old_inode->i_nlink == 1)
8498 BTRFS_I(old_inode)->dir_index = old_idx;
8499 if (new_inode->i_nlink == 1)
8500 BTRFS_I(new_inode)->dir_index = new_idx;
8501
8502 /*
8503 * Do the log updates for all inodes.
8504 *
8505 * If either entry is for a root we don't need to update the logs since
8506 * we've called btrfs_set_log_full_commit() before.
8507 */
8508 if (logs_pinned) {
8509 btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
8510 old_rename_ctx.index, new_dentry->d_parent);
8511 btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir),
8512 new_rename_ctx.index, old_dentry->d_parent);
8513 }
8514
8515 out_fail:
8516 if (logs_pinned) {
8517 btrfs_end_log_trans(root);
8518 btrfs_end_log_trans(dest);
8519 }
8520 ret2 = btrfs_end_transaction(trans);
8521 ret = ret ? ret : ret2;
8522 out_notrans:
8523 if (new_ino == BTRFS_FIRST_FREE_OBJECTID ||
8524 old_ino == BTRFS_FIRST_FREE_OBJECTID)
8525 up_read(&fs_info->subvol_sem);
8526
8527 fscrypt_free_filename(&new_fname);
8528 fscrypt_free_filename(&old_fname);
8529 return ret;
8530 }
8531
new_whiteout_inode(struct mnt_idmap * idmap,struct inode * dir)8532 static struct inode *new_whiteout_inode(struct mnt_idmap *idmap,
8533 struct inode *dir)
8534 {
8535 struct inode *inode;
8536
8537 inode = new_inode(dir->i_sb);
8538 if (inode) {
8539 inode_init_owner(idmap, inode, dir,
8540 S_IFCHR | WHITEOUT_MODE);
8541 inode->i_op = &btrfs_special_inode_operations;
8542 init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
8543 }
8544 return inode;
8545 }
8546
btrfs_rename(struct mnt_idmap * idmap,struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry,unsigned int flags)8547 static int btrfs_rename(struct mnt_idmap *idmap,
8548 struct inode *old_dir, struct dentry *old_dentry,
8549 struct inode *new_dir, struct dentry *new_dentry,
8550 unsigned int flags)
8551 {
8552 struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir);
8553 struct btrfs_new_inode_args whiteout_args = {
8554 .dir = old_dir,
8555 .dentry = old_dentry,
8556 };
8557 struct btrfs_trans_handle *trans;
8558 unsigned int trans_num_items;
8559 struct btrfs_root *root = BTRFS_I(old_dir)->root;
8560 struct btrfs_root *dest = BTRFS_I(new_dir)->root;
8561 struct inode *new_inode = d_inode(new_dentry);
8562 struct inode *old_inode = d_inode(old_dentry);
8563 struct btrfs_rename_ctx rename_ctx;
8564 u64 index = 0;
8565 int ret;
8566 int ret2;
8567 u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
8568 struct fscrypt_name old_fname, new_fname;
8569 bool logs_pinned = false;
8570
8571 if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
8572 return -EPERM;
8573
8574 /* we only allow rename subvolume link between subvolumes */
8575 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
8576 return -EXDEV;
8577
8578 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
8579 (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID))
8580 return -ENOTEMPTY;
8581
8582 if (S_ISDIR(old_inode->i_mode) && new_inode &&
8583 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
8584 return -ENOTEMPTY;
8585
8586 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname);
8587 if (ret)
8588 return ret;
8589
8590 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname);
8591 if (ret) {
8592 fscrypt_free_filename(&old_fname);
8593 return ret;
8594 }
8595
8596 /* check for collisions, even if the name isn't there */
8597 ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, &new_fname.disk_name);
8598 if (ret) {
8599 if (ret == -EEXIST) {
8600 /* we shouldn't get
8601 * eexist without a new_inode */
8602 if (WARN_ON(!new_inode)) {
8603 goto out_fscrypt_names;
8604 }
8605 } else {
8606 /* maybe -EOVERFLOW */
8607 goto out_fscrypt_names;
8608 }
8609 }
8610 ret = 0;
8611
8612 /*
8613 * we're using rename to replace one file with another. Start IO on it
8614 * now so we don't add too much work to the end of the transaction
8615 */
8616 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
8617 filemap_flush(old_inode->i_mapping);
8618
8619 if (flags & RENAME_WHITEOUT) {
8620 whiteout_args.inode = new_whiteout_inode(idmap, old_dir);
8621 if (!whiteout_args.inode) {
8622 ret = -ENOMEM;
8623 goto out_fscrypt_names;
8624 }
8625 ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items);
8626 if (ret)
8627 goto out_whiteout_inode;
8628 } else {
8629 /* 1 to update the old parent inode. */
8630 trans_num_items = 1;
8631 }
8632
8633 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8634 /* Close the race window with snapshot create/destroy ioctl */
8635 down_read(&fs_info->subvol_sem);
8636 /*
8637 * 1 to remove old root ref
8638 * 1 to remove old root backref
8639 * 1 to add new root ref
8640 * 1 to add new root backref
8641 */
8642 trans_num_items += 4;
8643 } else {
8644 /*
8645 * 1 to update inode
8646 * 1 to remove old inode ref
8647 * 1 to add new inode ref
8648 */
8649 trans_num_items += 3;
8650 }
8651 /*
8652 * 1 to remove old dir item
8653 * 1 to remove old dir index
8654 * 1 to add new dir item
8655 * 1 to add new dir index
8656 */
8657 trans_num_items += 4;
8658 /* 1 to update new parent inode if it's not the same as the old parent */
8659 if (new_dir != old_dir)
8660 trans_num_items++;
8661 if (new_inode) {
8662 /*
8663 * 1 to update inode
8664 * 1 to remove inode ref
8665 * 1 to remove dir item
8666 * 1 to remove dir index
8667 * 1 to possibly add orphan item
8668 */
8669 trans_num_items += 5;
8670 }
8671 trans = btrfs_start_transaction(root, trans_num_items);
8672 if (IS_ERR(trans)) {
8673 ret = PTR_ERR(trans);
8674 goto out_notrans;
8675 }
8676
8677 if (dest != root) {
8678 ret = btrfs_record_root_in_trans(trans, dest);
8679 if (ret)
8680 goto out_fail;
8681 }
8682
8683 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index);
8684 if (ret)
8685 goto out_fail;
8686
8687 BTRFS_I(old_inode)->dir_index = 0ULL;
8688 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
8689 /* force full log commit if subvolume involved. */
8690 btrfs_set_log_full_commit(trans);
8691 } else {
8692 ret = btrfs_insert_inode_ref(trans, dest, &new_fname.disk_name,
8693 old_ino, btrfs_ino(BTRFS_I(new_dir)),
8694 index);
8695 if (ret)
8696 goto out_fail;
8697 }
8698
8699 inode_inc_iversion(old_dir);
8700 inode_inc_iversion(new_dir);
8701 inode_inc_iversion(old_inode);
8702 simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
8703
8704 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
8705 /*
8706 * If we are renaming in the same directory (and it's not a
8707 * root entry) pin the log to prevent any concurrent task from
8708 * logging the directory after we removed the old entry and
8709 * before we add the new entry, otherwise that task can sync
8710 * a log without any entry for the inode we are renaming and
8711 * therefore replaying that log, if a power failure happens
8712 * after syncing the log, would result in deleting the inode.
8713 *
8714 * If the rename affects two different directories, we want to
8715 * make sure the that there's no log commit that contains
8716 * updates for only one of the directories but not for the
8717 * other.
8718 *
8719 * If we are renaming an entry for a root, we don't care about
8720 * log updates since we called btrfs_set_log_full_commit().
8721 */
8722 btrfs_pin_log_trans(root);
8723 btrfs_pin_log_trans(dest);
8724 logs_pinned = true;
8725 }
8726
8727 if (old_dentry->d_parent != new_dentry->d_parent)
8728 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
8729 BTRFS_I(old_inode), true);
8730
8731 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
8732 ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry);
8733 if (unlikely(ret)) {
8734 btrfs_abort_transaction(trans, ret);
8735 goto out_fail;
8736 }
8737 } else {
8738 ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
8739 BTRFS_I(d_inode(old_dentry)),
8740 &old_fname.disk_name, &rename_ctx);
8741 if (unlikely(ret)) {
8742 btrfs_abort_transaction(trans, ret);
8743 goto out_fail;
8744 }
8745 ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
8746 if (unlikely(ret)) {
8747 btrfs_abort_transaction(trans, ret);
8748 goto out_fail;
8749 }
8750 }
8751
8752 if (new_inode) {
8753 inode_inc_iversion(new_inode);
8754 if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
8755 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
8756 ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
8757 if (unlikely(ret)) {
8758 btrfs_abort_transaction(trans, ret);
8759 goto out_fail;
8760 }
8761 BUG_ON(new_inode->i_nlink == 0);
8762 } else {
8763 ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir),
8764 BTRFS_I(d_inode(new_dentry)),
8765 &new_fname.disk_name);
8766 if (unlikely(ret)) {
8767 btrfs_abort_transaction(trans, ret);
8768 goto out_fail;
8769 }
8770 }
8771 if (new_inode->i_nlink == 0) {
8772 ret = btrfs_orphan_add(trans,
8773 BTRFS_I(d_inode(new_dentry)));
8774 if (unlikely(ret)) {
8775 btrfs_abort_transaction(trans, ret);
8776 goto out_fail;
8777 }
8778 }
8779 }
8780
8781 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
8782 &new_fname.disk_name, 0, index);
8783 if (unlikely(ret)) {
8784 btrfs_abort_transaction(trans, ret);
8785 goto out_fail;
8786 }
8787
8788 if (old_inode->i_nlink == 1)
8789 BTRFS_I(old_inode)->dir_index = index;
8790
8791 if (logs_pinned)
8792 btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
8793 rename_ctx.index, new_dentry->d_parent);
8794
8795 if (flags & RENAME_WHITEOUT) {
8796 ret = btrfs_create_new_inode(trans, &whiteout_args);
8797 if (unlikely(ret)) {
8798 btrfs_abort_transaction(trans, ret);
8799 goto out_fail;
8800 } else {
8801 unlock_new_inode(whiteout_args.inode);
8802 iput(whiteout_args.inode);
8803 whiteout_args.inode = NULL;
8804 }
8805 }
8806 out_fail:
8807 if (logs_pinned) {
8808 btrfs_end_log_trans(root);
8809 btrfs_end_log_trans(dest);
8810 }
8811 ret2 = btrfs_end_transaction(trans);
8812 ret = ret ? ret : ret2;
8813 out_notrans:
8814 if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
8815 up_read(&fs_info->subvol_sem);
8816 if (flags & RENAME_WHITEOUT)
8817 btrfs_new_inode_args_destroy(&whiteout_args);
8818 out_whiteout_inode:
8819 if (flags & RENAME_WHITEOUT)
8820 iput(whiteout_args.inode);
8821 out_fscrypt_names:
8822 fscrypt_free_filename(&old_fname);
8823 fscrypt_free_filename(&new_fname);
8824 return ret;
8825 }
8826
btrfs_rename2(struct mnt_idmap * idmap,struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry,unsigned int flags)8827 static int btrfs_rename2(struct mnt_idmap *idmap, struct inode *old_dir,
8828 struct dentry *old_dentry, struct inode *new_dir,
8829 struct dentry *new_dentry, unsigned int flags)
8830 {
8831 int ret;
8832
8833 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
8834 return -EINVAL;
8835
8836 if (flags & RENAME_EXCHANGE)
8837 ret = btrfs_rename_exchange(old_dir, old_dentry, new_dir,
8838 new_dentry);
8839 else
8840 ret = btrfs_rename(idmap, old_dir, old_dentry, new_dir,
8841 new_dentry, flags);
8842
8843 btrfs_btree_balance_dirty(BTRFS_I(new_dir)->root->fs_info);
8844
8845 return ret;
8846 }
8847
8848 struct btrfs_delalloc_work {
8849 struct inode *inode;
8850 struct completion completion;
8851 struct list_head list;
8852 struct btrfs_work work;
8853 };
8854
btrfs_run_delalloc_work(struct btrfs_work * work)8855 static void btrfs_run_delalloc_work(struct btrfs_work *work)
8856 {
8857 struct btrfs_delalloc_work *delalloc_work;
8858 struct inode *inode;
8859
8860 delalloc_work = container_of(work, struct btrfs_delalloc_work,
8861 work);
8862 inode = delalloc_work->inode;
8863 filemap_flush(inode->i_mapping);
8864 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
8865 &BTRFS_I(inode)->runtime_flags))
8866 filemap_flush(inode->i_mapping);
8867
8868 iput(inode);
8869 complete(&delalloc_work->completion);
8870 }
8871
btrfs_alloc_delalloc_work(struct inode * inode)8872 static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode)
8873 {
8874 struct btrfs_delalloc_work *work;
8875
8876 work = kmalloc_obj(*work, GFP_NOFS);
8877 if (!work)
8878 return NULL;
8879
8880 init_completion(&work->completion);
8881 INIT_LIST_HEAD(&work->list);
8882 work->inode = inode;
8883 btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL);
8884
8885 return work;
8886 }
8887
8888 /*
8889 * some fairly slow code that needs optimization. This walks the list
8890 * of all the inodes with pending delalloc and forces them to disk.
8891 */
start_delalloc_inodes(struct btrfs_root * root,long * nr_to_write,bool snapshot,bool in_reclaim_context)8892 static int start_delalloc_inodes(struct btrfs_root *root, long *nr_to_write,
8893 bool snapshot, bool in_reclaim_context)
8894 {
8895 struct btrfs_delalloc_work *work, *next;
8896 LIST_HEAD(works);
8897 LIST_HEAD(splice);
8898 int ret = 0;
8899
8900 mutex_lock(&root->delalloc_mutex);
8901 spin_lock(&root->delalloc_lock);
8902 list_splice_init(&root->delalloc_inodes, &splice);
8903 while (!list_empty(&splice)) {
8904 struct btrfs_inode *inode;
8905 struct inode *tmp_inode;
8906
8907 inode = list_first_entry(&splice, struct btrfs_inode, delalloc_inodes);
8908
8909 list_move_tail(&inode->delalloc_inodes, &root->delalloc_inodes);
8910
8911 if (in_reclaim_context &&
8912 test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags))
8913 continue;
8914
8915 tmp_inode = igrab(&inode->vfs_inode);
8916 if (!tmp_inode) {
8917 cond_resched_lock(&root->delalloc_lock);
8918 continue;
8919 }
8920 spin_unlock(&root->delalloc_lock);
8921
8922 if (snapshot)
8923 set_bit(BTRFS_INODE_SNAPSHOT_FLUSH, &inode->runtime_flags);
8924 if (nr_to_write == NULL) {
8925 work = btrfs_alloc_delalloc_work(tmp_inode);
8926 if (!work) {
8927 iput(tmp_inode);
8928 ret = -ENOMEM;
8929 goto out;
8930 }
8931 list_add_tail(&work->list, &works);
8932 btrfs_queue_work(root->fs_info->flush_workers,
8933 &work->work);
8934 } else {
8935 ret = filemap_flush_nr(tmp_inode->i_mapping,
8936 nr_to_write);
8937 btrfs_add_delayed_iput(inode);
8938
8939 if (ret || *nr_to_write <= 0)
8940 goto out;
8941 }
8942 cond_resched();
8943 spin_lock(&root->delalloc_lock);
8944 }
8945 spin_unlock(&root->delalloc_lock);
8946
8947 out:
8948 list_for_each_entry_safe(work, next, &works, list) {
8949 list_del_init(&work->list);
8950 wait_for_completion(&work->completion);
8951 kfree(work);
8952 }
8953
8954 if (!list_empty(&splice)) {
8955 spin_lock(&root->delalloc_lock);
8956 list_splice_tail(&splice, &root->delalloc_inodes);
8957 spin_unlock(&root->delalloc_lock);
8958 }
8959 mutex_unlock(&root->delalloc_mutex);
8960 return ret;
8961 }
8962
btrfs_start_delalloc_snapshot(struct btrfs_root * root,bool in_reclaim_context)8963 int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context)
8964 {
8965 struct btrfs_fs_info *fs_info = root->fs_info;
8966
8967 if (BTRFS_FS_ERROR(fs_info))
8968 return -EROFS;
8969 return start_delalloc_inodes(root, NULL, true, in_reclaim_context);
8970 }
8971
btrfs_start_delalloc_roots(struct btrfs_fs_info * fs_info,long nr,bool in_reclaim_context)8972 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
8973 bool in_reclaim_context)
8974 {
8975 long *nr_to_write = nr == LONG_MAX ? NULL : &nr;
8976 struct btrfs_root *root;
8977 LIST_HEAD(splice);
8978 int ret;
8979
8980 if (BTRFS_FS_ERROR(fs_info))
8981 return -EROFS;
8982
8983 mutex_lock(&fs_info->delalloc_root_mutex);
8984 spin_lock(&fs_info->delalloc_root_lock);
8985 list_splice_init(&fs_info->delalloc_roots, &splice);
8986 while (!list_empty(&splice)) {
8987 root = list_first_entry(&splice, struct btrfs_root,
8988 delalloc_root);
8989 root = btrfs_grab_root(root);
8990 BUG_ON(!root);
8991 list_move_tail(&root->delalloc_root,
8992 &fs_info->delalloc_roots);
8993 spin_unlock(&fs_info->delalloc_root_lock);
8994
8995 ret = start_delalloc_inodes(root, nr_to_write, false,
8996 in_reclaim_context);
8997 btrfs_put_root(root);
8998 if (ret < 0 || nr <= 0)
8999 goto out;
9000 spin_lock(&fs_info->delalloc_root_lock);
9001 }
9002 spin_unlock(&fs_info->delalloc_root_lock);
9003
9004 ret = 0;
9005 out:
9006 if (!list_empty(&splice)) {
9007 spin_lock(&fs_info->delalloc_root_lock);
9008 list_splice_tail(&splice, &fs_info->delalloc_roots);
9009 spin_unlock(&fs_info->delalloc_root_lock);
9010 }
9011 mutex_unlock(&fs_info->delalloc_root_mutex);
9012 return ret;
9013 }
9014
btrfs_symlink(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,const char * symname)9015 static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
9016 struct dentry *dentry, const char *symname)
9017 {
9018 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
9019 struct btrfs_trans_handle *trans;
9020 struct btrfs_root *root = BTRFS_I(dir)->root;
9021 struct btrfs_path *path;
9022 struct btrfs_key key;
9023 struct inode *inode;
9024 struct btrfs_new_inode_args new_inode_args = {
9025 .dir = dir,
9026 .dentry = dentry,
9027 };
9028 unsigned int trans_num_items;
9029 int ret;
9030 int name_len;
9031 int datasize;
9032 unsigned long ptr;
9033 struct btrfs_file_extent_item *ei;
9034 struct extent_buffer *leaf;
9035
9036 name_len = strlen(symname);
9037 /*
9038 * Symlinks utilize uncompressed inline extent data, which should not
9039 * reach block size.
9040 */
9041 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
9042 name_len >= fs_info->sectorsize)
9043 return -ENAMETOOLONG;
9044
9045 inode = new_inode(dir->i_sb);
9046 if (!inode)
9047 return -ENOMEM;
9048 inode_init_owner(idmap, inode, dir, S_IFLNK | S_IRWXUGO);
9049 inode->i_op = &btrfs_symlink_inode_operations;
9050 inode_nohighmem(inode);
9051 inode->i_mapping->a_ops = &btrfs_aops;
9052 btrfs_i_size_write(BTRFS_I(inode), name_len);
9053 inode_set_bytes(inode, name_len);
9054
9055 new_inode_args.inode = inode;
9056 ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
9057 if (ret)
9058 goto out_inode;
9059 /* 1 additional item for the inline extent */
9060 trans_num_items++;
9061
9062 trans = btrfs_start_transaction(root, trans_num_items);
9063 if (IS_ERR(trans)) {
9064 ret = PTR_ERR(trans);
9065 goto out_new_inode_args;
9066 }
9067
9068 ret = btrfs_create_new_inode(trans, &new_inode_args);
9069 if (ret)
9070 goto out;
9071
9072 path = btrfs_alloc_path();
9073 if (unlikely(!path)) {
9074 ret = -ENOMEM;
9075 btrfs_abort_transaction(trans, ret);
9076 discard_new_inode(inode);
9077 inode = NULL;
9078 goto out;
9079 }
9080 key.objectid = btrfs_ino(BTRFS_I(inode));
9081 key.type = BTRFS_EXTENT_DATA_KEY;
9082 key.offset = 0;
9083 datasize = btrfs_file_extent_calc_inline_size(name_len);
9084 ret = btrfs_insert_empty_item(trans, root, path, &key, datasize);
9085 if (unlikely(ret)) {
9086 btrfs_abort_transaction(trans, ret);
9087 btrfs_free_path(path);
9088 discard_new_inode(inode);
9089 inode = NULL;
9090 goto out;
9091 }
9092 leaf = path->nodes[0];
9093 ei = btrfs_item_ptr(leaf, path->slots[0],
9094 struct btrfs_file_extent_item);
9095 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
9096 btrfs_set_file_extent_type(leaf, ei,
9097 BTRFS_FILE_EXTENT_INLINE);
9098 btrfs_set_file_extent_encryption(leaf, ei, 0);
9099 btrfs_set_file_extent_compression(leaf, ei, 0);
9100 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
9101 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
9102
9103 ptr = btrfs_file_extent_inline_start(ei);
9104 write_extent_buffer(leaf, symname, ptr, name_len);
9105 btrfs_free_path(path);
9106
9107 d_instantiate_new(dentry, inode);
9108 ret = 0;
9109 out:
9110 btrfs_end_transaction(trans);
9111 btrfs_btree_balance_dirty(fs_info);
9112 out_new_inode_args:
9113 btrfs_new_inode_args_destroy(&new_inode_args);
9114 out_inode:
9115 if (ret)
9116 iput(inode);
9117 return ret;
9118 }
9119
insert_prealloc_file_extent(struct btrfs_trans_handle * trans_in,struct btrfs_inode * inode,struct btrfs_key * ins,u64 file_offset)9120 static struct btrfs_trans_handle *insert_prealloc_file_extent(
9121 struct btrfs_trans_handle *trans_in,
9122 struct btrfs_inode *inode,
9123 struct btrfs_key *ins,
9124 u64 file_offset)
9125 {
9126 struct btrfs_file_extent_item stack_fi;
9127 struct btrfs_replace_extent_info extent_info;
9128 struct btrfs_trans_handle *trans = trans_in;
9129 struct btrfs_path *path;
9130 u64 start = ins->objectid;
9131 u64 len = ins->offset;
9132 u64 qgroup_released = 0;
9133 int ret;
9134
9135 memset(&stack_fi, 0, sizeof(stack_fi));
9136
9137 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_PREALLOC);
9138 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, start);
9139 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, len);
9140 btrfs_set_stack_file_extent_num_bytes(&stack_fi, len);
9141 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, len);
9142 btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE);
9143 /* Encryption and other encoding is reserved and all 0 */
9144
9145 ret = btrfs_qgroup_release_data(inode, file_offset, len, &qgroup_released);
9146 if (ret < 0)
9147 return ERR_PTR(ret);
9148
9149 if (trans) {
9150 ret = insert_reserved_file_extent(trans, inode,
9151 file_offset, &stack_fi,
9152 true, qgroup_released);
9153 if (ret)
9154 goto free_qgroup;
9155 return trans;
9156 }
9157
9158 extent_info.disk_offset = start;
9159 extent_info.disk_len = len;
9160 extent_info.data_offset = 0;
9161 extent_info.data_len = len;
9162 extent_info.file_offset = file_offset;
9163 extent_info.extent_buf = (char *)&stack_fi;
9164 extent_info.is_new_extent = true;
9165 extent_info.update_times = true;
9166 extent_info.qgroup_reserved = qgroup_released;
9167 extent_info.insertions = 0;
9168
9169 path = btrfs_alloc_path();
9170 if (!path) {
9171 ret = -ENOMEM;
9172 goto free_qgroup;
9173 }
9174
9175 ret = btrfs_replace_file_extents(inode, path, file_offset,
9176 file_offset + len - 1, &extent_info,
9177 &trans);
9178 btrfs_free_path(path);
9179 if (ret)
9180 goto free_qgroup;
9181 return trans;
9182
9183 free_qgroup:
9184 /*
9185 * We have released qgroup data range at the beginning of the function,
9186 * and normally qgroup_released bytes will be freed when committing
9187 * transaction.
9188 * But if we error out early, we have to free what we have released
9189 * or we leak qgroup data reservation.
9190 */
9191 btrfs_qgroup_free_refroot(inode->root->fs_info,
9192 btrfs_root_id(inode->root), qgroup_released,
9193 BTRFS_QGROUP_RSV_DATA);
9194 return ERR_PTR(ret);
9195 }
9196
__btrfs_prealloc_file_range(struct inode * inode,int mode,u64 start,u64 num_bytes,u64 min_size,loff_t actual_len,u64 * alloc_hint,struct btrfs_trans_handle * trans)9197 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
9198 u64 start, u64 num_bytes, u64 min_size,
9199 loff_t actual_len, u64 *alloc_hint,
9200 struct btrfs_trans_handle *trans)
9201 {
9202 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
9203 struct extent_map *em;
9204 struct btrfs_root *root = BTRFS_I(inode)->root;
9205 struct btrfs_key ins;
9206 u64 cur_offset = start;
9207 u64 clear_offset = start;
9208 u64 i_size;
9209 u64 cur_bytes;
9210 u64 last_alloc = (u64)-1;
9211 int ret = 0;
9212 bool own_trans = true;
9213 u64 end = start + num_bytes - 1;
9214
9215 if (trans)
9216 own_trans = false;
9217 while (num_bytes > 0) {
9218 cur_bytes = min_t(u64, num_bytes, SZ_256M);
9219 cur_bytes = max(cur_bytes, min_size);
9220 /*
9221 * If we are severely fragmented we could end up with really
9222 * small allocations, so if the allocator is returning small
9223 * chunks lets make its job easier by only searching for those
9224 * sized chunks.
9225 */
9226 cur_bytes = min(cur_bytes, last_alloc);
9227 ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes,
9228 min_size, 0, *alloc_hint, &ins, true, false);
9229 if (ret)
9230 break;
9231
9232 /*
9233 * We've reserved this space, and thus converted it from
9234 * ->bytes_may_use to ->bytes_reserved. Any error that happens
9235 * from here on out we will only need to clear our reservation
9236 * for the remaining unreserved area, so advance our
9237 * clear_offset by our extent size.
9238 */
9239 clear_offset += ins.offset;
9240
9241 last_alloc = ins.offset;
9242 trans = insert_prealloc_file_extent(trans, BTRFS_I(inode),
9243 &ins, cur_offset);
9244 /*
9245 * Now that we inserted the prealloc extent we can finally
9246 * decrement the number of reservations in the block group.
9247 * If we did it before, we could race with relocation and have
9248 * relocation miss the reserved extent, making it fail later.
9249 */
9250 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
9251 if (IS_ERR(trans)) {
9252 ret = PTR_ERR(trans);
9253 btrfs_free_reserved_extent(fs_info, ins.objectid,
9254 ins.offset, false);
9255 break;
9256 }
9257
9258 em = btrfs_alloc_extent_map();
9259 if (!em) {
9260 btrfs_drop_extent_map_range(BTRFS_I(inode), cur_offset,
9261 cur_offset + ins.offset - 1, false);
9262 btrfs_set_inode_full_sync(BTRFS_I(inode));
9263 goto next;
9264 }
9265
9266 em->start = cur_offset;
9267 em->len = ins.offset;
9268 em->disk_bytenr = ins.objectid;
9269 em->offset = 0;
9270 em->disk_num_bytes = ins.offset;
9271 em->ram_bytes = ins.offset;
9272 em->flags |= EXTENT_FLAG_PREALLOC;
9273 em->generation = trans->transid;
9274
9275 ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, true);
9276 btrfs_free_extent_map(em);
9277 next:
9278 num_bytes -= ins.offset;
9279 cur_offset += ins.offset;
9280 *alloc_hint = ins.objectid + ins.offset;
9281
9282 inode_inc_iversion(inode);
9283 inode_set_ctime_current(inode);
9284 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
9285 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
9286 (actual_len > inode->i_size) &&
9287 (cur_offset > inode->i_size)) {
9288 if (cur_offset > actual_len)
9289 i_size = actual_len;
9290 else
9291 i_size = cur_offset;
9292 i_size_write(inode, i_size);
9293 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
9294 }
9295
9296 ret = btrfs_update_inode(trans, BTRFS_I(inode));
9297
9298 if (unlikely(ret)) {
9299 btrfs_abort_transaction(trans, ret);
9300 if (own_trans)
9301 btrfs_end_transaction(trans);
9302 break;
9303 }
9304
9305 if (own_trans) {
9306 btrfs_end_transaction(trans);
9307 trans = NULL;
9308 }
9309 }
9310 if (clear_offset < end)
9311 btrfs_free_reserved_data_space(BTRFS_I(inode), NULL, clear_offset,
9312 end - clear_offset + 1);
9313 return ret;
9314 }
9315
btrfs_prealloc_file_range(struct inode * inode,int mode,u64 start,u64 num_bytes,u64 min_size,loff_t actual_len,u64 * alloc_hint)9316 int btrfs_prealloc_file_range(struct inode *inode, int mode,
9317 u64 start, u64 num_bytes, u64 min_size,
9318 loff_t actual_len, u64 *alloc_hint)
9319 {
9320 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9321 min_size, actual_len, alloc_hint,
9322 NULL);
9323 }
9324
btrfs_prealloc_file_range_trans(struct inode * inode,struct btrfs_trans_handle * trans,int mode,u64 start,u64 num_bytes,u64 min_size,loff_t actual_len,u64 * alloc_hint)9325 int btrfs_prealloc_file_range_trans(struct inode *inode,
9326 struct btrfs_trans_handle *trans, int mode,
9327 u64 start, u64 num_bytes, u64 min_size,
9328 loff_t actual_len, u64 *alloc_hint)
9329 {
9330 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9331 min_size, actual_len, alloc_hint, trans);
9332 }
9333
9334 /*
9335 * NOTE: in case you are adding MAY_EXEC check for directories:
9336 * we are marking them with IOP_FASTPERM_MAY_EXEC, allowing path lookup to
9337 * elide calls here.
9338 */
btrfs_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)9339 static int btrfs_permission(struct mnt_idmap *idmap,
9340 struct inode *inode, int mask)
9341 {
9342 struct btrfs_root *root = BTRFS_I(inode)->root;
9343 umode_t mode = inode->i_mode;
9344
9345 if (mask & MAY_WRITE &&
9346 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
9347 if (btrfs_root_readonly(root))
9348 return -EROFS;
9349 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
9350 return -EACCES;
9351 }
9352 return generic_permission(idmap, inode, mask);
9353 }
9354
btrfs_tmpfile(struct mnt_idmap * idmap,struct inode * dir,struct file * file,umode_t mode)9355 static int btrfs_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
9356 struct file *file, umode_t mode)
9357 {
9358 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
9359 struct btrfs_trans_handle *trans;
9360 struct btrfs_root *root = BTRFS_I(dir)->root;
9361 struct inode *inode;
9362 struct btrfs_new_inode_args new_inode_args = {
9363 .dir = dir,
9364 .dentry = file->f_path.dentry,
9365 .orphan = true,
9366 };
9367 unsigned int trans_num_items;
9368 int ret;
9369
9370 inode = new_inode(dir->i_sb);
9371 if (!inode)
9372 return -ENOMEM;
9373 inode_init_owner(idmap, inode, dir, mode);
9374 inode->i_fop = &btrfs_file_operations;
9375 inode->i_op = &btrfs_file_inode_operations;
9376 inode->i_mapping->a_ops = &btrfs_aops;
9377
9378 new_inode_args.inode = inode;
9379 ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
9380 if (ret)
9381 goto out_inode;
9382
9383 trans = btrfs_start_transaction(root, trans_num_items);
9384 if (IS_ERR(trans)) {
9385 ret = PTR_ERR(trans);
9386 goto out_new_inode_args;
9387 }
9388
9389 ret = btrfs_create_new_inode(trans, &new_inode_args);
9390
9391 /*
9392 * We set number of links to 0 in btrfs_create_new_inode(), and here we
9393 * set it to 1 because d_tmpfile() will issue a warning if the count is
9394 * 0, through:
9395 *
9396 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
9397 */
9398 set_nlink(inode, 1);
9399
9400 if (!ret) {
9401 d_tmpfile(file, inode);
9402 unlock_new_inode(inode);
9403 mark_inode_dirty(inode);
9404 }
9405
9406 btrfs_end_transaction(trans);
9407 btrfs_btree_balance_dirty(fs_info);
9408 out_new_inode_args:
9409 btrfs_new_inode_args_destroy(&new_inode_args);
9410 out_inode:
9411 if (ret)
9412 iput(inode);
9413 return finish_open_simple(file, ret);
9414 }
9415
btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info * fs_info,int compress_type)9416 int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
9417 int compress_type)
9418 {
9419 switch (compress_type) {
9420 case BTRFS_COMPRESS_NONE:
9421 return BTRFS_ENCODED_IO_COMPRESSION_NONE;
9422 case BTRFS_COMPRESS_ZLIB:
9423 return BTRFS_ENCODED_IO_COMPRESSION_ZLIB;
9424 case BTRFS_COMPRESS_LZO:
9425 /*
9426 * The LZO format depends on the sector size. 64K is the maximum
9427 * sector size that we support.
9428 */
9429 if (fs_info->sectorsize < SZ_4K || fs_info->sectorsize > SZ_64K)
9430 return -EINVAL;
9431 return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K +
9432 (fs_info->sectorsize_bits - 12);
9433 case BTRFS_COMPRESS_ZSTD:
9434 return BTRFS_ENCODED_IO_COMPRESSION_ZSTD;
9435 default:
9436 return -EUCLEAN;
9437 }
9438 }
9439
btrfs_encoded_read_inline(struct kiocb * iocb,struct iov_iter * iter,u64 start,u64 lockend,struct extent_state ** cached_state,u64 extent_start,size_t count,struct btrfs_ioctl_encoded_io_args * encoded,bool * unlocked)9440 static ssize_t btrfs_encoded_read_inline(
9441 struct kiocb *iocb,
9442 struct iov_iter *iter, u64 start,
9443 u64 lockend,
9444 struct extent_state **cached_state,
9445 u64 extent_start, size_t count,
9446 struct btrfs_ioctl_encoded_io_args *encoded,
9447 bool *unlocked)
9448 {
9449 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
9450 struct btrfs_root *root = inode->root;
9451 struct btrfs_fs_info *fs_info = root->fs_info;
9452 struct extent_io_tree *io_tree = &inode->io_tree;
9453 BTRFS_PATH_AUTO_FREE(path);
9454 struct extent_buffer *leaf;
9455 struct btrfs_file_extent_item *item;
9456 u64 ram_bytes;
9457 unsigned long ptr;
9458 void *tmp;
9459 ssize_t ret;
9460 const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
9461
9462 path = btrfs_alloc_path();
9463 if (!path)
9464 return -ENOMEM;
9465
9466 path->nowait = nowait;
9467
9468 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
9469 extent_start, 0);
9470 if (ret) {
9471 if (unlikely(ret > 0)) {
9472 /* The extent item disappeared? */
9473 return -EIO;
9474 }
9475 return ret;
9476 }
9477 leaf = path->nodes[0];
9478 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
9479
9480 ram_bytes = btrfs_file_extent_ram_bytes(leaf, item);
9481 ptr = btrfs_file_extent_inline_start(item);
9482
9483 encoded->len = min_t(u64, extent_start + ram_bytes,
9484 inode->vfs_inode.i_size) - iocb->ki_pos;
9485 ret = btrfs_encoded_io_compression_from_extent(fs_info,
9486 btrfs_file_extent_compression(leaf, item));
9487 if (ret < 0)
9488 return ret;
9489 encoded->compression = ret;
9490 if (encoded->compression) {
9491 size_t inline_size;
9492
9493 inline_size = btrfs_file_extent_inline_item_len(leaf,
9494 path->slots[0]);
9495 if (inline_size > count)
9496 return -ENOBUFS;
9497
9498 count = inline_size;
9499 encoded->unencoded_len = ram_bytes;
9500 encoded->unencoded_offset = iocb->ki_pos - extent_start;
9501 } else {
9502 count = min_t(u64, count, encoded->len);
9503 encoded->len = count;
9504 encoded->unencoded_len = count;
9505 ptr += iocb->ki_pos - extent_start;
9506 }
9507
9508 tmp = kmalloc(count, GFP_NOFS);
9509 if (!tmp)
9510 return -ENOMEM;
9511
9512 read_extent_buffer(leaf, tmp, ptr, count);
9513 btrfs_release_path(path);
9514 btrfs_unlock_extent(io_tree, start, lockend, cached_state);
9515 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9516 *unlocked = true;
9517
9518 ret = copy_to_iter(tmp, count, iter);
9519 if (ret != count)
9520 ret = -EFAULT;
9521 kfree(tmp);
9522
9523 return ret;
9524 }
9525
9526 struct btrfs_encoded_read_private {
9527 struct completion *sync_reads;
9528 void *uring_ctx;
9529 refcount_t pending_refs;
9530 blk_status_t status;
9531 };
9532
btrfs_encoded_read_endio(struct btrfs_bio * bbio)9533 static void btrfs_encoded_read_endio(struct btrfs_bio *bbio)
9534 {
9535 struct btrfs_encoded_read_private *priv = bbio->private;
9536
9537 if (bbio->bio.bi_status) {
9538 /*
9539 * The memory barrier implied by the refcount_dec_and_test() here
9540 * pairs with the memory barrier implied by the refcount_dec_and_test()
9541 * in btrfs_encoded_read_regular_fill_pages() to ensure that
9542 * this write is observed before the load of status in
9543 * btrfs_encoded_read_regular_fill_pages().
9544 */
9545 WRITE_ONCE(priv->status, bbio->bio.bi_status);
9546 }
9547 if (refcount_dec_and_test(&priv->pending_refs)) {
9548 int err = blk_status_to_errno(READ_ONCE(priv->status));
9549
9550 if (priv->uring_ctx) {
9551 btrfs_uring_read_extent_endio(priv->uring_ctx, err);
9552 kfree(priv);
9553 } else {
9554 complete(priv->sync_reads);
9555 }
9556 }
9557 bio_put(&bbio->bio);
9558 }
9559
btrfs_encoded_read_regular_fill_pages(struct btrfs_inode * inode,u64 disk_bytenr,u64 disk_io_size,struct page ** pages,void * uring_ctx)9560 int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
9561 u64 disk_bytenr, u64 disk_io_size,
9562 struct page **pages, void *uring_ctx)
9563 {
9564 struct btrfs_encoded_read_private *priv, sync_priv;
9565 struct completion sync_reads;
9566 unsigned long i = 0;
9567 struct btrfs_bio *bbio;
9568 int ret;
9569
9570 /*
9571 * Fast path for synchronous reads which completes in this call, io_uring
9572 * needs longer time span.
9573 */
9574 if (uring_ctx) {
9575 priv = kmalloc_obj(struct btrfs_encoded_read_private, GFP_NOFS);
9576 if (!priv)
9577 return -ENOMEM;
9578 } else {
9579 priv = &sync_priv;
9580 init_completion(&sync_reads);
9581 priv->sync_reads = &sync_reads;
9582 }
9583
9584 refcount_set(&priv->pending_refs, 1);
9585 priv->status = 0;
9586 priv->uring_ctx = uring_ctx;
9587
9588 bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, inode, 0,
9589 btrfs_encoded_read_endio, priv);
9590 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
9591
9592 do {
9593 size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE);
9594
9595 if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) {
9596 refcount_inc(&priv->pending_refs);
9597 btrfs_submit_bbio(bbio, 0);
9598
9599 bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, inode, 0,
9600 btrfs_encoded_read_endio, priv);
9601 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
9602 continue;
9603 }
9604
9605 i++;
9606 disk_bytenr += bytes;
9607 disk_io_size -= bytes;
9608 } while (disk_io_size);
9609
9610 refcount_inc(&priv->pending_refs);
9611 btrfs_submit_bbio(bbio, 0);
9612
9613 if (uring_ctx) {
9614 if (refcount_dec_and_test(&priv->pending_refs)) {
9615 ret = blk_status_to_errno(READ_ONCE(priv->status));
9616 btrfs_uring_read_extent_endio(uring_ctx, ret);
9617 kfree(priv);
9618 return ret;
9619 }
9620
9621 return -EIOCBQUEUED;
9622 } else {
9623 if (!refcount_dec_and_test(&priv->pending_refs))
9624 wait_for_completion_io(&sync_reads);
9625 /* See btrfs_encoded_read_endio() for ordering. */
9626 return blk_status_to_errno(READ_ONCE(priv->status));
9627 }
9628 }
9629
btrfs_encoded_read_regular(struct kiocb * iocb,struct iov_iter * iter,u64 start,u64 lockend,struct extent_state ** cached_state,u64 disk_bytenr,u64 disk_io_size,size_t count,bool compressed,bool * unlocked)9630 ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, struct iov_iter *iter,
9631 u64 start, u64 lockend,
9632 struct extent_state **cached_state,
9633 u64 disk_bytenr, u64 disk_io_size,
9634 size_t count, bool compressed, bool *unlocked)
9635 {
9636 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
9637 struct extent_io_tree *io_tree = &inode->io_tree;
9638 struct page **pages;
9639 unsigned long nr_pages, i;
9640 u64 cur;
9641 size_t page_offset;
9642 ssize_t ret;
9643
9644 nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE);
9645 pages = kzalloc_objs(struct page *, nr_pages, GFP_NOFS);
9646 if (!pages)
9647 return -ENOMEM;
9648 ret = btrfs_alloc_page_array(nr_pages, pages, false);
9649 if (ret) {
9650 ret = -ENOMEM;
9651 goto out;
9652 }
9653
9654 ret = btrfs_encoded_read_regular_fill_pages(inode, disk_bytenr,
9655 disk_io_size, pages, NULL);
9656 if (ret)
9657 goto out;
9658
9659 btrfs_unlock_extent(io_tree, start, lockend, cached_state);
9660 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9661 *unlocked = true;
9662
9663 if (compressed) {
9664 i = 0;
9665 page_offset = 0;
9666 } else {
9667 i = (iocb->ki_pos - start) >> PAGE_SHIFT;
9668 page_offset = (iocb->ki_pos - start) & (PAGE_SIZE - 1);
9669 }
9670 cur = 0;
9671 while (cur < count) {
9672 size_t bytes = min_t(size_t, count - cur,
9673 PAGE_SIZE - page_offset);
9674
9675 if (copy_page_to_iter(pages[i], page_offset, bytes,
9676 iter) != bytes) {
9677 ret = -EFAULT;
9678 goto out;
9679 }
9680 i++;
9681 cur += bytes;
9682 page_offset = 0;
9683 }
9684 ret = count;
9685 out:
9686 for (i = 0; i < nr_pages; i++) {
9687 if (pages[i])
9688 __free_page(pages[i]);
9689 }
9690 kfree(pages);
9691 return ret;
9692 }
9693
btrfs_encoded_read(struct kiocb * iocb,struct iov_iter * iter,struct btrfs_ioctl_encoded_io_args * encoded,struct extent_state ** cached_state,u64 * disk_bytenr,u64 * disk_io_size)9694 ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
9695 struct btrfs_ioctl_encoded_io_args *encoded,
9696 struct extent_state **cached_state,
9697 u64 *disk_bytenr, u64 *disk_io_size)
9698 {
9699 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
9700 struct btrfs_fs_info *fs_info = inode->root->fs_info;
9701 struct extent_io_tree *io_tree = &inode->io_tree;
9702 ssize_t ret;
9703 size_t count = iov_iter_count(iter);
9704 u64 start, lockend;
9705 struct extent_map *em;
9706 const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
9707 bool unlocked = false;
9708
9709 file_accessed(iocb->ki_filp);
9710
9711 ret = btrfs_inode_lock(inode,
9712 BTRFS_ILOCK_SHARED | (nowait ? BTRFS_ILOCK_TRY : 0));
9713 if (ret)
9714 return ret;
9715
9716 if (iocb->ki_pos >= inode->vfs_inode.i_size) {
9717 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9718 return 0;
9719 }
9720 start = ALIGN_DOWN(iocb->ki_pos, fs_info->sectorsize);
9721 /*
9722 * We don't know how long the extent containing iocb->ki_pos is, but if
9723 * it's compressed we know that it won't be longer than this.
9724 */
9725 lockend = start + BTRFS_MAX_UNCOMPRESSED - 1;
9726
9727 if (nowait) {
9728 struct btrfs_ordered_extent *ordered;
9729
9730 if (filemap_range_needs_writeback(inode->vfs_inode.i_mapping,
9731 start, lockend)) {
9732 ret = -EAGAIN;
9733 goto out_unlock_inode;
9734 }
9735
9736 if (!btrfs_try_lock_extent(io_tree, start, lockend, cached_state)) {
9737 ret = -EAGAIN;
9738 goto out_unlock_inode;
9739 }
9740
9741 ordered = btrfs_lookup_ordered_range(inode, start,
9742 lockend - start + 1);
9743 if (ordered) {
9744 btrfs_put_ordered_extent(ordered);
9745 btrfs_unlock_extent(io_tree, start, lockend, cached_state);
9746 ret = -EAGAIN;
9747 goto out_unlock_inode;
9748 }
9749 } else {
9750 for (;;) {
9751 struct btrfs_ordered_extent *ordered;
9752
9753 ret = btrfs_wait_ordered_range(inode, start,
9754 lockend - start + 1);
9755 if (ret)
9756 goto out_unlock_inode;
9757
9758 btrfs_lock_extent(io_tree, start, lockend, cached_state);
9759 ordered = btrfs_lookup_ordered_range(inode, start,
9760 lockend - start + 1);
9761 if (!ordered)
9762 break;
9763 btrfs_put_ordered_extent(ordered);
9764 btrfs_unlock_extent(io_tree, start, lockend, cached_state);
9765 cond_resched();
9766 }
9767 }
9768
9769 em = btrfs_get_extent(inode, NULL, start, lockend - start + 1);
9770 if (IS_ERR(em)) {
9771 ret = PTR_ERR(em);
9772 goto out_unlock_extent;
9773 }
9774
9775 if (em->disk_bytenr == EXTENT_MAP_INLINE) {
9776 u64 extent_start = em->start;
9777
9778 /*
9779 * For inline extents we get everything we need out of the
9780 * extent item.
9781 */
9782 btrfs_free_extent_map(em);
9783 em = NULL;
9784 ret = btrfs_encoded_read_inline(iocb, iter, start, lockend,
9785 cached_state, extent_start,
9786 count, encoded, &unlocked);
9787 goto out_unlock_extent;
9788 }
9789
9790 /*
9791 * We only want to return up to EOF even if the extent extends beyond
9792 * that.
9793 */
9794 encoded->len = min_t(u64, btrfs_extent_map_end(em),
9795 inode->vfs_inode.i_size) - iocb->ki_pos;
9796 if (em->disk_bytenr == EXTENT_MAP_HOLE ||
9797 (em->flags & EXTENT_FLAG_PREALLOC)) {
9798 *disk_bytenr = EXTENT_MAP_HOLE;
9799 count = min_t(u64, count, encoded->len);
9800 encoded->len = count;
9801 encoded->unencoded_len = count;
9802 } else if (btrfs_extent_map_is_compressed(em)) {
9803 *disk_bytenr = em->disk_bytenr;
9804 /*
9805 * Bail if the buffer isn't large enough to return the whole
9806 * compressed extent.
9807 */
9808 if (em->disk_num_bytes > count) {
9809 ret = -ENOBUFS;
9810 goto out_em;
9811 }
9812 *disk_io_size = em->disk_num_bytes;
9813 count = em->disk_num_bytes;
9814 encoded->unencoded_len = em->ram_bytes;
9815 encoded->unencoded_offset = iocb->ki_pos - (em->start - em->offset);
9816 ret = btrfs_encoded_io_compression_from_extent(fs_info,
9817 btrfs_extent_map_compression(em));
9818 if (ret < 0)
9819 goto out_em;
9820 encoded->compression = ret;
9821 } else {
9822 *disk_bytenr = btrfs_extent_map_block_start(em) + (start - em->start);
9823 if (encoded->len > count)
9824 encoded->len = count;
9825 /*
9826 * Don't read beyond what we locked. This also limits the page
9827 * allocations that we'll do.
9828 */
9829 *disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start;
9830 count = start + *disk_io_size - iocb->ki_pos;
9831 encoded->len = count;
9832 encoded->unencoded_len = count;
9833 *disk_io_size = ALIGN(*disk_io_size, fs_info->sectorsize);
9834 }
9835 btrfs_free_extent_map(em);
9836 em = NULL;
9837
9838 if (*disk_bytenr == EXTENT_MAP_HOLE) {
9839 btrfs_unlock_extent(io_tree, start, lockend, cached_state);
9840 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9841 unlocked = true;
9842 ret = iov_iter_zero(count, iter);
9843 if (ret != count)
9844 ret = -EFAULT;
9845 } else {
9846 ret = -EIOCBQUEUED;
9847 goto out_unlock_extent;
9848 }
9849
9850 out_em:
9851 btrfs_free_extent_map(em);
9852 out_unlock_extent:
9853 /* Leave inode and extent locked if we need to do a read. */
9854 if (!unlocked && ret != -EIOCBQUEUED)
9855 btrfs_unlock_extent(io_tree, start, lockend, cached_state);
9856 out_unlock_inode:
9857 if (!unlocked && ret != -EIOCBQUEUED)
9858 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9859 return ret;
9860 }
9861
btrfs_do_encoded_write(struct kiocb * iocb,struct iov_iter * from,const struct btrfs_ioctl_encoded_io_args * encoded)9862 ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
9863 const struct btrfs_ioctl_encoded_io_args *encoded)
9864 {
9865 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
9866 struct btrfs_root *root = inode->root;
9867 struct btrfs_fs_info *fs_info = root->fs_info;
9868 struct extent_io_tree *io_tree = &inode->io_tree;
9869 struct extent_changeset *data_reserved = NULL;
9870 struct extent_state *cached_state = NULL;
9871 struct btrfs_ordered_extent *ordered;
9872 struct btrfs_file_extent file_extent;
9873 struct compressed_bio *cb = NULL;
9874 int compression;
9875 size_t orig_count;
9876 const u32 min_folio_size = btrfs_min_folio_size(fs_info);
9877 u64 start, end;
9878 u64 num_bytes, ram_bytes, disk_num_bytes;
9879 struct btrfs_key ins;
9880 bool extent_reserved = false;
9881 struct extent_map *em;
9882 ssize_t ret;
9883
9884 switch (encoded->compression) {
9885 case BTRFS_ENCODED_IO_COMPRESSION_ZLIB:
9886 compression = BTRFS_COMPRESS_ZLIB;
9887 break;
9888 case BTRFS_ENCODED_IO_COMPRESSION_ZSTD:
9889 compression = BTRFS_COMPRESS_ZSTD;
9890 break;
9891 case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K:
9892 case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K:
9893 case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K:
9894 case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K:
9895 case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K:
9896 /* The sector size must match for LZO. */
9897 if (encoded->compression -
9898 BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 12 !=
9899 fs_info->sectorsize_bits)
9900 return -EINVAL;
9901 compression = BTRFS_COMPRESS_LZO;
9902 break;
9903 default:
9904 return -EINVAL;
9905 }
9906 if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE)
9907 return -EINVAL;
9908
9909 /*
9910 * Compressed extents should always have checksums, so error out if we
9911 * have a NOCOW file or inode was created while mounted with NODATASUM.
9912 */
9913 if (inode->flags & BTRFS_INODE_NODATASUM)
9914 return -EINVAL;
9915
9916 orig_count = iov_iter_count(from);
9917
9918 /* The extent size must be sane. */
9919 if (encoded->unencoded_len > BTRFS_MAX_UNCOMPRESSED ||
9920 orig_count > BTRFS_MAX_COMPRESSED || orig_count == 0)
9921 return -EINVAL;
9922
9923 /*
9924 * The compressed data must be smaller than the decompressed data.
9925 *
9926 * It's of course possible for data to compress to larger or the same
9927 * size, but the buffered I/O path falls back to no compression for such
9928 * data, and we don't want to break any assumptions by creating these
9929 * extents.
9930 *
9931 * Note that this is less strict than the current check we have that the
9932 * compressed data must be at least one sector smaller than the
9933 * decompressed data. We only want to enforce the weaker requirement
9934 * from old kernels that it is at least one byte smaller.
9935 */
9936 if (orig_count >= encoded->unencoded_len)
9937 return -EINVAL;
9938
9939 /* The extent must start on a sector boundary. */
9940 start = iocb->ki_pos;
9941 if (!IS_ALIGNED(start, fs_info->sectorsize))
9942 return -EINVAL;
9943
9944 /*
9945 * The extent must end on a sector boundary. However, we allow a write
9946 * which ends at or extends i_size to have an unaligned length; we round
9947 * up the extent size and set i_size to the unaligned end.
9948 */
9949 if (start + encoded->len < inode->vfs_inode.i_size &&
9950 !IS_ALIGNED(start + encoded->len, fs_info->sectorsize))
9951 return -EINVAL;
9952
9953 /* Finally, the offset in the unencoded data must be sector-aligned. */
9954 if (!IS_ALIGNED(encoded->unencoded_offset, fs_info->sectorsize))
9955 return -EINVAL;
9956
9957 num_bytes = ALIGN(encoded->len, fs_info->sectorsize);
9958 ram_bytes = ALIGN(encoded->unencoded_len, fs_info->sectorsize);
9959 end = start + num_bytes - 1;
9960
9961 /*
9962 * If the extent cannot be inline, the compressed data on disk must be
9963 * sector-aligned. For convenience, we extend it with zeroes if it
9964 * isn't.
9965 */
9966 disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize);
9967
9968 cb = btrfs_alloc_compressed_write(inode, start, num_bytes);
9969 for (int i = 0; i * min_folio_size < disk_num_bytes; i++) {
9970 struct folio *folio;
9971 size_t bytes = min(min_folio_size, iov_iter_count(from));
9972 char *kaddr;
9973
9974 folio = btrfs_alloc_compr_folio(fs_info);
9975 if (!folio) {
9976 ret = -ENOMEM;
9977 goto out_cb;
9978 }
9979 kaddr = kmap_local_folio(folio, 0);
9980 ret = copy_from_iter(kaddr, bytes, from);
9981 kunmap_local(kaddr);
9982 if (ret != bytes) {
9983 folio_put(folio);
9984 ret = -EFAULT;
9985 goto out_cb;
9986 }
9987 if (bytes < min_folio_size)
9988 folio_zero_range(folio, bytes, min_folio_size - bytes);
9989 ret = bio_add_folio(&cb->bbio.bio, folio, folio_size(folio), 0);
9990 if (unlikely(!ret)) {
9991 folio_put(folio);
9992 ret = -EINVAL;
9993 goto out_cb;
9994 }
9995 }
9996 ASSERT(cb->bbio.bio.bi_iter.bi_size == disk_num_bytes);
9997
9998 for (;;) {
9999 ret = btrfs_wait_ordered_range(inode, start, num_bytes);
10000 if (ret)
10001 goto out_cb;
10002 ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping,
10003 start >> PAGE_SHIFT,
10004 end >> PAGE_SHIFT);
10005 if (ret)
10006 goto out_cb;
10007 btrfs_lock_extent(io_tree, start, end, &cached_state);
10008 ordered = btrfs_lookup_ordered_range(inode, start, num_bytes);
10009 if (!ordered &&
10010 !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end))
10011 break;
10012 if (ordered)
10013 btrfs_put_ordered_extent(ordered);
10014 btrfs_unlock_extent(io_tree, start, end, &cached_state);
10015 cond_resched();
10016 }
10017
10018 /*
10019 * We don't use the higher-level delalloc space functions because our
10020 * num_bytes and disk_num_bytes are different.
10021 */
10022 ret = btrfs_alloc_data_chunk_ondemand(inode, disk_num_bytes);
10023 if (ret)
10024 goto out_unlock;
10025 ret = btrfs_qgroup_reserve_data(inode, &data_reserved, start, num_bytes);
10026 if (ret)
10027 goto out_free_data_space;
10028 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes, disk_num_bytes,
10029 false);
10030 if (ret)
10031 goto out_qgroup_free_data;
10032
10033 /* Try an inline extent first. */
10034 if (encoded->unencoded_len == encoded->len &&
10035 encoded->unencoded_offset == 0 &&
10036 can_cow_file_range_inline(inode, start, encoded->len, orig_count)) {
10037 ret = __cow_file_range_inline(inode, encoded->len,
10038 orig_count, compression,
10039 bio_first_folio_all(&cb->bbio.bio),
10040 true);
10041 if (ret <= 0) {
10042 if (ret == 0)
10043 ret = orig_count;
10044 goto out_delalloc_release;
10045 }
10046 }
10047
10048 ret = btrfs_reserve_extent(root, disk_num_bytes, disk_num_bytes,
10049 disk_num_bytes, 0, 0, &ins, true, true);
10050 if (ret)
10051 goto out_delalloc_release;
10052 extent_reserved = true;
10053
10054 file_extent.disk_bytenr = ins.objectid;
10055 file_extent.disk_num_bytes = ins.offset;
10056 file_extent.num_bytes = num_bytes;
10057 file_extent.ram_bytes = ram_bytes;
10058 file_extent.offset = encoded->unencoded_offset;
10059 file_extent.compression = compression;
10060 em = btrfs_create_io_em(inode, start, &file_extent, BTRFS_ORDERED_COMPRESSED);
10061 if (IS_ERR(em)) {
10062 ret = PTR_ERR(em);
10063 goto out_free_reserved;
10064 }
10065 btrfs_free_extent_map(em);
10066
10067 ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
10068 (1U << BTRFS_ORDERED_ENCODED) |
10069 (1U << BTRFS_ORDERED_COMPRESSED));
10070 if (IS_ERR(ordered)) {
10071 btrfs_drop_extent_map_range(inode, start, end, false);
10072 ret = PTR_ERR(ordered);
10073 goto out_free_reserved;
10074 }
10075 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10076
10077 if (start + encoded->len > inode->vfs_inode.i_size)
10078 i_size_write(&inode->vfs_inode, start + encoded->len);
10079
10080 btrfs_unlock_extent(io_tree, start, end, &cached_state);
10081
10082 btrfs_delalloc_release_extents(inode, num_bytes);
10083
10084 btrfs_submit_compressed_write(ordered, cb);
10085 ret = orig_count;
10086 goto out;
10087
10088 out_free_reserved:
10089 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10090 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, true);
10091 out_delalloc_release:
10092 btrfs_delalloc_release_extents(inode, num_bytes);
10093 btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0);
10094 out_qgroup_free_data:
10095 if (ret < 0)
10096 btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes, NULL);
10097 out_free_data_space:
10098 /*
10099 * If btrfs_reserve_extent() succeeded, then we already decremented
10100 * bytes_may_use.
10101 */
10102 if (!extent_reserved)
10103 btrfs_free_reserved_data_space_noquota(inode, disk_num_bytes);
10104 out_unlock:
10105 btrfs_unlock_extent(io_tree, start, end, &cached_state);
10106 out_cb:
10107 if (cb)
10108 cleanup_compressed_bio(cb);
10109 out:
10110 if (ret >= 0)
10111 iocb->ki_pos += encoded->len;
10112 return ret;
10113 }
10114
10115 #ifdef CONFIG_SWAP
10116 /*
10117 * Add an entry indicating a block group or device which is pinned by a
10118 * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a
10119 * negative errno on failure.
10120 */
btrfs_add_swapfile_pin(struct inode * inode,void * ptr,bool is_block_group)10121 static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr,
10122 bool is_block_group)
10123 {
10124 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
10125 struct btrfs_swapfile_pin *sp, *entry;
10126 struct rb_node **p;
10127 struct rb_node *parent = NULL;
10128
10129 sp = kmalloc_obj(*sp, GFP_NOFS);
10130 if (!sp)
10131 return -ENOMEM;
10132 sp->ptr = ptr;
10133 sp->inode = inode;
10134 sp->is_block_group = is_block_group;
10135 sp->bg_extent_count = 1;
10136
10137 spin_lock(&fs_info->swapfile_pins_lock);
10138 p = &fs_info->swapfile_pins.rb_node;
10139 while (*p) {
10140 parent = *p;
10141 entry = rb_entry(parent, struct btrfs_swapfile_pin, node);
10142 if (sp->ptr < entry->ptr ||
10143 (sp->ptr == entry->ptr && sp->inode < entry->inode)) {
10144 p = &(*p)->rb_left;
10145 } else if (sp->ptr > entry->ptr ||
10146 (sp->ptr == entry->ptr && sp->inode > entry->inode)) {
10147 p = &(*p)->rb_right;
10148 } else {
10149 if (is_block_group)
10150 entry->bg_extent_count++;
10151 spin_unlock(&fs_info->swapfile_pins_lock);
10152 kfree(sp);
10153 return 1;
10154 }
10155 }
10156 rb_link_node(&sp->node, parent, p);
10157 rb_insert_color(&sp->node, &fs_info->swapfile_pins);
10158 spin_unlock(&fs_info->swapfile_pins_lock);
10159 return 0;
10160 }
10161
10162 /* Free all of the entries pinned by this swapfile. */
btrfs_free_swapfile_pins(struct inode * inode)10163 static void btrfs_free_swapfile_pins(struct inode *inode)
10164 {
10165 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
10166 struct btrfs_swapfile_pin *sp;
10167 struct rb_node *node, *next;
10168
10169 spin_lock(&fs_info->swapfile_pins_lock);
10170 node = rb_first(&fs_info->swapfile_pins);
10171 while (node) {
10172 next = rb_next(node);
10173 sp = rb_entry(node, struct btrfs_swapfile_pin, node);
10174 if (sp->inode == inode) {
10175 rb_erase(&sp->node, &fs_info->swapfile_pins);
10176 if (sp->is_block_group) {
10177 btrfs_dec_block_group_swap_extents(sp->ptr,
10178 sp->bg_extent_count);
10179 btrfs_put_block_group(sp->ptr);
10180 }
10181 kfree(sp);
10182 }
10183 node = next;
10184 }
10185 spin_unlock(&fs_info->swapfile_pins_lock);
10186 }
10187
10188 struct btrfs_swap_info {
10189 u64 start;
10190 u64 block_start;
10191 u64 block_len;
10192 u64 lowest_ppage;
10193 u64 highest_ppage;
10194 unsigned long nr_pages;
10195 int nr_extents;
10196 };
10197
btrfs_add_swap_extent(struct swap_info_struct * sis,struct btrfs_swap_info * bsi)10198 static int btrfs_add_swap_extent(struct swap_info_struct *sis,
10199 struct btrfs_swap_info *bsi)
10200 {
10201 unsigned long nr_pages;
10202 unsigned long max_pages;
10203 u64 first_ppage, first_ppage_reported, next_ppage;
10204 int ret;
10205
10206 /*
10207 * Our swapfile may have had its size extended after the swap header was
10208 * written. In that case activating the swapfile should not go beyond
10209 * the max size set in the swap header.
10210 */
10211 if (bsi->nr_pages >= sis->max)
10212 return 0;
10213
10214 max_pages = sis->max - bsi->nr_pages;
10215 first_ppage = PAGE_ALIGN(bsi->block_start) >> PAGE_SHIFT;
10216 next_ppage = PAGE_ALIGN_DOWN(bsi->block_start + bsi->block_len) >> PAGE_SHIFT;
10217
10218 if (first_ppage >= next_ppage)
10219 return 0;
10220 nr_pages = next_ppage - first_ppage;
10221 nr_pages = min(nr_pages, max_pages);
10222
10223 first_ppage_reported = first_ppage;
10224 if (bsi->start == 0)
10225 first_ppage_reported++;
10226 if (bsi->lowest_ppage > first_ppage_reported)
10227 bsi->lowest_ppage = first_ppage_reported;
10228 if (bsi->highest_ppage < (next_ppage - 1))
10229 bsi->highest_ppage = next_ppage - 1;
10230
10231 ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage);
10232 if (ret < 0)
10233 return ret;
10234 bsi->nr_extents += ret;
10235 bsi->nr_pages += nr_pages;
10236 return 0;
10237 }
10238
btrfs_swap_deactivate(struct file * file)10239 static void btrfs_swap_deactivate(struct file *file)
10240 {
10241 struct inode *inode = file_inode(file);
10242
10243 btrfs_free_swapfile_pins(inode);
10244 atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles);
10245 }
10246
btrfs_swap_activate(struct swap_info_struct * sis,struct file * file,sector_t * span)10247 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
10248 sector_t *span)
10249 {
10250 struct inode *inode = file_inode(file);
10251 struct btrfs_root *root = BTRFS_I(inode)->root;
10252 struct btrfs_fs_info *fs_info = root->fs_info;
10253 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
10254 struct extent_state *cached_state = NULL;
10255 struct btrfs_chunk_map *map = NULL;
10256 struct btrfs_device *device = NULL;
10257 struct btrfs_swap_info bsi = {
10258 .lowest_ppage = (sector_t)-1ULL,
10259 };
10260 struct btrfs_backref_share_check_ctx *backref_ctx = NULL;
10261 struct btrfs_path *path = NULL;
10262 int ret = 0;
10263 u64 isize;
10264 u64 prev_extent_end = 0;
10265
10266 /*
10267 * Acquire the inode's mmap lock to prevent races with memory mapped
10268 * writes, as they could happen after we flush delalloc below and before
10269 * we lock the extent range further below. The inode was already locked
10270 * up in the call chain.
10271 */
10272 btrfs_assert_inode_locked(BTRFS_I(inode));
10273 down_write(&BTRFS_I(inode)->i_mmap_lock);
10274
10275 /*
10276 * If the swap file was just created, make sure delalloc is done. If the
10277 * file changes again after this, the user is doing something stupid and
10278 * we don't really care.
10279 */
10280 ret = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
10281 if (ret)
10282 goto out_unlock_mmap;
10283
10284 /*
10285 * The inode is locked, so these flags won't change after we check them.
10286 */
10287 if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) {
10288 btrfs_warn(fs_info, "swapfile must not be compressed");
10289 ret = -EINVAL;
10290 goto out_unlock_mmap;
10291 }
10292 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) {
10293 btrfs_warn(fs_info, "swapfile must not be copy-on-write");
10294 ret = -EINVAL;
10295 goto out_unlock_mmap;
10296 }
10297 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
10298 btrfs_warn(fs_info, "swapfile must not be checksummed");
10299 ret = -EINVAL;
10300 goto out_unlock_mmap;
10301 }
10302
10303 path = btrfs_alloc_path();
10304 backref_ctx = btrfs_alloc_backref_share_check_ctx();
10305 if (!path || !backref_ctx) {
10306 ret = -ENOMEM;
10307 goto out_unlock_mmap;
10308 }
10309
10310 /*
10311 * Balance or device remove/replace/resize can move stuff around from
10312 * under us. The exclop protection makes sure they aren't running/won't
10313 * run concurrently while we are mapping the swap extents, and
10314 * fs_info->swapfile_pins prevents them from running while the swap
10315 * file is active and moving the extents. Note that this also prevents
10316 * a concurrent device add which isn't actually necessary, but it's not
10317 * really worth the trouble to allow it.
10318 */
10319 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) {
10320 btrfs_warn(fs_info,
10321 "cannot activate swapfile while exclusive operation is running");
10322 ret = -EBUSY;
10323 goto out_unlock_mmap;
10324 }
10325
10326 /*
10327 * Prevent snapshot creation while we are activating the swap file.
10328 * We do not want to race with snapshot creation. If snapshot creation
10329 * already started before we bumped nr_swapfiles from 0 to 1 and
10330 * completes before the first write into the swap file after it is
10331 * activated, than that write would fallback to COW.
10332 */
10333 if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) {
10334 btrfs_exclop_finish(fs_info);
10335 btrfs_warn(fs_info,
10336 "cannot activate swapfile because snapshot creation is in progress");
10337 ret = -EINVAL;
10338 goto out_unlock_mmap;
10339 }
10340 /*
10341 * Snapshots can create extents which require COW even if NODATACOW is
10342 * set. We use this counter to prevent snapshots. We must increment it
10343 * before walking the extents because we don't want a concurrent
10344 * snapshot to run after we've already checked the extents.
10345 *
10346 * It is possible that subvolume is marked for deletion but still not
10347 * removed yet. To prevent this race, we check the root status before
10348 * activating the swapfile.
10349 */
10350 spin_lock(&root->root_item_lock);
10351 if (btrfs_root_dead(root)) {
10352 spin_unlock(&root->root_item_lock);
10353
10354 btrfs_drew_write_unlock(&root->snapshot_lock);
10355 btrfs_exclop_finish(fs_info);
10356 btrfs_warn(fs_info,
10357 "cannot activate swapfile because subvolume %llu is being deleted",
10358 btrfs_root_id(root));
10359 ret = -EPERM;
10360 goto out_unlock_mmap;
10361 }
10362 atomic_inc(&root->nr_swapfiles);
10363 spin_unlock(&root->root_item_lock);
10364
10365 isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
10366
10367 btrfs_lock_extent(io_tree, 0, isize - 1, &cached_state);
10368 while (prev_extent_end < isize) {
10369 struct btrfs_key key;
10370 struct extent_buffer *leaf;
10371 struct btrfs_file_extent_item *ei;
10372 struct btrfs_block_group *bg;
10373 u64 logical_block_start;
10374 u64 physical_block_start;
10375 u64 extent_gen;
10376 u64 disk_bytenr;
10377 u64 len;
10378
10379 key.objectid = btrfs_ino(BTRFS_I(inode));
10380 key.type = BTRFS_EXTENT_DATA_KEY;
10381 key.offset = prev_extent_end;
10382
10383 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
10384 if (ret < 0)
10385 goto out;
10386
10387 /*
10388 * If key not found it means we have an implicit hole (NO_HOLES
10389 * is enabled).
10390 */
10391 if (ret > 0) {
10392 btrfs_warn(fs_info, "swapfile must not have holes");
10393 ret = -EINVAL;
10394 goto out;
10395 }
10396
10397 leaf = path->nodes[0];
10398 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
10399
10400 if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) {
10401 /*
10402 * It's unlikely we'll ever actually find ourselves
10403 * here, as a file small enough to fit inline won't be
10404 * big enough to store more than the swap header, but in
10405 * case something changes in the future, let's catch it
10406 * here rather than later.
10407 */
10408 btrfs_warn(fs_info, "swapfile must not be inline");
10409 ret = -EINVAL;
10410 goto out;
10411 }
10412
10413 if (btrfs_file_extent_compression(leaf, ei) != BTRFS_COMPRESS_NONE) {
10414 btrfs_warn(fs_info, "swapfile must not be compressed");
10415 ret = -EINVAL;
10416 goto out;
10417 }
10418
10419 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
10420 if (disk_bytenr == 0) {
10421 btrfs_warn(fs_info, "swapfile must not have holes");
10422 ret = -EINVAL;
10423 goto out;
10424 }
10425
10426 logical_block_start = disk_bytenr + btrfs_file_extent_offset(leaf, ei);
10427 extent_gen = btrfs_file_extent_generation(leaf, ei);
10428 prev_extent_end = btrfs_file_extent_end(path);
10429
10430 if (prev_extent_end > isize)
10431 len = isize - key.offset;
10432 else
10433 len = btrfs_file_extent_num_bytes(leaf, ei);
10434
10435 backref_ctx->curr_leaf_bytenr = leaf->start;
10436
10437 /*
10438 * Don't need the path anymore, release to avoid deadlocks when
10439 * calling btrfs_is_data_extent_shared() because when joining a
10440 * transaction it can block waiting for the current one's commit
10441 * which in turn may be trying to lock the same leaf to flush
10442 * delayed items for example.
10443 */
10444 btrfs_release_path(path);
10445
10446 ret = btrfs_is_data_extent_shared(BTRFS_I(inode), disk_bytenr,
10447 extent_gen, backref_ctx);
10448 if (ret < 0) {
10449 goto out;
10450 } else if (ret > 0) {
10451 btrfs_warn(fs_info,
10452 "swapfile must not be copy-on-write");
10453 ret = -EINVAL;
10454 goto out;
10455 }
10456
10457 map = btrfs_get_chunk_map(fs_info, logical_block_start, len);
10458 if (IS_ERR(map)) {
10459 ret = PTR_ERR(map);
10460 goto out;
10461 }
10462
10463 if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
10464 btrfs_warn(fs_info,
10465 "swapfile must have single data profile");
10466 ret = -EINVAL;
10467 goto out;
10468 }
10469
10470 if (device == NULL) {
10471 device = map->stripes[0].dev;
10472 ret = btrfs_add_swapfile_pin(inode, device, false);
10473 if (ret == 1)
10474 ret = 0;
10475 else if (ret)
10476 goto out;
10477 } else if (device != map->stripes[0].dev) {
10478 btrfs_warn(fs_info, "swapfile must be on one device");
10479 ret = -EINVAL;
10480 goto out;
10481 }
10482
10483 physical_block_start = (map->stripes[0].physical +
10484 (logical_block_start - map->start));
10485 btrfs_free_chunk_map(map);
10486 map = NULL;
10487
10488 bg = btrfs_lookup_block_group(fs_info, logical_block_start);
10489 if (!bg) {
10490 btrfs_warn(fs_info,
10491 "could not find block group containing swapfile");
10492 ret = -EINVAL;
10493 goto out;
10494 }
10495
10496 if (!btrfs_inc_block_group_swap_extents(bg)) {
10497 btrfs_warn(fs_info,
10498 "block group for swapfile at %llu is read-only%s",
10499 bg->start,
10500 atomic_read(&fs_info->scrubs_running) ?
10501 " (scrub running)" : "");
10502 btrfs_put_block_group(bg);
10503 ret = -EINVAL;
10504 goto out;
10505 }
10506
10507 ret = btrfs_add_swapfile_pin(inode, bg, true);
10508 if (ret) {
10509 btrfs_put_block_group(bg);
10510 if (ret == 1)
10511 ret = 0;
10512 else
10513 goto out;
10514 }
10515
10516 if (bsi.block_len &&
10517 bsi.block_start + bsi.block_len == physical_block_start) {
10518 bsi.block_len += len;
10519 } else {
10520 if (bsi.block_len) {
10521 ret = btrfs_add_swap_extent(sis, &bsi);
10522 if (ret)
10523 goto out;
10524 }
10525 bsi.start = key.offset;
10526 bsi.block_start = physical_block_start;
10527 bsi.block_len = len;
10528 }
10529
10530 if (fatal_signal_pending(current)) {
10531 ret = -EINTR;
10532 goto out;
10533 }
10534
10535 cond_resched();
10536 }
10537
10538 if (bsi.block_len)
10539 ret = btrfs_add_swap_extent(sis, &bsi);
10540
10541 out:
10542 if (!IS_ERR_OR_NULL(map))
10543 btrfs_free_chunk_map(map);
10544
10545 btrfs_unlock_extent(io_tree, 0, isize - 1, &cached_state);
10546
10547 if (ret)
10548 btrfs_swap_deactivate(file);
10549
10550 btrfs_drew_write_unlock(&root->snapshot_lock);
10551
10552 btrfs_exclop_finish(fs_info);
10553
10554 out_unlock_mmap:
10555 up_write(&BTRFS_I(inode)->i_mmap_lock);
10556 btrfs_free_backref_share_ctx(backref_ctx);
10557 btrfs_free_path(path);
10558 if (ret)
10559 return ret;
10560
10561 if (device)
10562 sis->bdev = device->bdev;
10563 *span = bsi.highest_ppage - bsi.lowest_ppage + 1;
10564 sis->max = bsi.nr_pages;
10565 sis->pages = bsi.nr_pages - 1;
10566 return bsi.nr_extents;
10567 }
10568 #else
btrfs_swap_deactivate(struct file * file)10569 static void btrfs_swap_deactivate(struct file *file)
10570 {
10571 }
10572
btrfs_swap_activate(struct swap_info_struct * sis,struct file * file,sector_t * span)10573 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
10574 sector_t *span)
10575 {
10576 return -EOPNOTSUPP;
10577 }
10578 #endif
10579
10580 /*
10581 * Update the number of bytes used in the VFS' inode. When we replace extents in
10582 * a range (clone, dedupe, fallocate's zero range), we must update the number of
10583 * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls
10584 * always get a correct value.
10585 */
btrfs_update_inode_bytes(struct btrfs_inode * inode,const u64 add_bytes,const u64 del_bytes)10586 void btrfs_update_inode_bytes(struct btrfs_inode *inode,
10587 const u64 add_bytes,
10588 const u64 del_bytes)
10589 {
10590 if (add_bytes == del_bytes)
10591 return;
10592
10593 spin_lock(&inode->lock);
10594 if (del_bytes > 0)
10595 inode_sub_bytes(&inode->vfs_inode, del_bytes);
10596 if (add_bytes > 0)
10597 inode_add_bytes(&inode->vfs_inode, add_bytes);
10598 spin_unlock(&inode->lock);
10599 }
10600
10601 /*
10602 * Verify that there are no ordered extents for a given file range.
10603 *
10604 * @inode: The target inode.
10605 * @start: Start offset of the file range, should be sector size aligned.
10606 * @end: End offset (inclusive) of the file range, its value +1 should be
10607 * sector size aligned.
10608 *
10609 * This should typically be used for cases where we locked an inode's VFS lock in
10610 * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode,
10611 * we have flushed all delalloc in the range, we have waited for all ordered
10612 * extents in the range to complete and finally we have locked the file range in
10613 * the inode's io_tree.
10614 */
btrfs_assert_inode_range_clean(struct btrfs_inode * inode,u64 start,u64 end)10615 void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end)
10616 {
10617 struct btrfs_root *root = inode->root;
10618 struct btrfs_ordered_extent *ordered;
10619
10620 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
10621 return;
10622
10623 ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start);
10624 if (ordered) {
10625 btrfs_err(root->fs_info,
10626 "found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])",
10627 start, end, btrfs_ino(inode), btrfs_root_id(root),
10628 ordered->file_offset,
10629 ordered->file_offset + ordered->num_bytes - 1);
10630 btrfs_put_ordered_extent(ordered);
10631 }
10632
10633 ASSERT(ordered == NULL);
10634 }
10635
10636 /*
10637 * Find the first inode with a minimum number.
10638 *
10639 * @root: The root to search for.
10640 * @min_ino: The minimum inode number.
10641 *
10642 * Find the first inode in the @root with a number >= @min_ino and return it.
10643 * Returns NULL if no such inode found.
10644 */
btrfs_find_first_inode(struct btrfs_root * root,u64 min_ino)10645 struct btrfs_inode *btrfs_find_first_inode(struct btrfs_root *root, u64 min_ino)
10646 {
10647 struct btrfs_inode *inode;
10648 unsigned long from = min_ino;
10649
10650 xa_lock(&root->inodes);
10651 while (true) {
10652 inode = xa_find(&root->inodes, &from, ULONG_MAX, XA_PRESENT);
10653 if (!inode)
10654 break;
10655 if (igrab(&inode->vfs_inode))
10656 break;
10657
10658 from = btrfs_ino(inode) + 1;
10659 cond_resched_lock(&root->inodes.xa_lock);
10660 }
10661 xa_unlock(&root->inodes);
10662
10663 return inode;
10664 }
10665
10666 static const struct inode_operations btrfs_dir_inode_operations = {
10667 .getattr = btrfs_getattr,
10668 .lookup = btrfs_lookup,
10669 .create = btrfs_create,
10670 .unlink = btrfs_unlink,
10671 .link = btrfs_link,
10672 .mkdir = btrfs_mkdir,
10673 .rmdir = btrfs_rmdir,
10674 .rename = btrfs_rename2,
10675 .symlink = btrfs_symlink,
10676 .setattr = btrfs_setattr,
10677 .mknod = btrfs_mknod,
10678 .listxattr = btrfs_listxattr,
10679 .permission = btrfs_permission,
10680 .get_inode_acl = btrfs_get_acl,
10681 .set_acl = btrfs_set_acl,
10682 .update_time = btrfs_update_time,
10683 .tmpfile = btrfs_tmpfile,
10684 .fileattr_get = btrfs_fileattr_get,
10685 .fileattr_set = btrfs_fileattr_set,
10686 };
10687
10688 static const struct file_operations btrfs_dir_file_operations = {
10689 .llseek = btrfs_dir_llseek,
10690 .read = generic_read_dir,
10691 .iterate_shared = btrfs_real_readdir,
10692 .open = btrfs_opendir,
10693 .unlocked_ioctl = btrfs_ioctl,
10694 #ifdef CONFIG_COMPAT
10695 .compat_ioctl = btrfs_compat_ioctl,
10696 #endif
10697 .release = btrfs_release_file,
10698 .fsync = btrfs_sync_file,
10699 .setlease = generic_setlease,
10700 };
10701
10702 /*
10703 * btrfs doesn't support the bmap operation because swapfiles
10704 * use bmap to make a mapping of extents in the file. They assume
10705 * these extents won't change over the life of the file and they
10706 * use the bmap result to do IO directly to the drive.
10707 *
10708 * the btrfs bmap call would return logical addresses that aren't
10709 * suitable for IO and they also will change frequently as COW
10710 * operations happen. So, swapfile + btrfs == corruption.
10711 *
10712 * For now we're avoiding this by dropping bmap.
10713 */
10714 static const struct address_space_operations btrfs_aops = {
10715 .read_folio = btrfs_read_folio,
10716 .writepages = btrfs_writepages,
10717 .readahead = btrfs_readahead,
10718 .invalidate_folio = btrfs_invalidate_folio,
10719 .launder_folio = btrfs_launder_folio,
10720 .release_folio = btrfs_release_folio,
10721 .migrate_folio = btrfs_migrate_folio,
10722 .dirty_folio = filemap_dirty_folio,
10723 .error_remove_folio = generic_error_remove_folio,
10724 .swap_activate = btrfs_swap_activate,
10725 .swap_deactivate = btrfs_swap_deactivate,
10726 };
10727
10728 static const struct inode_operations btrfs_file_inode_operations = {
10729 .getattr = btrfs_getattr,
10730 .setattr = btrfs_setattr,
10731 .listxattr = btrfs_listxattr,
10732 .permission = btrfs_permission,
10733 .fiemap = btrfs_fiemap,
10734 .get_inode_acl = btrfs_get_acl,
10735 .set_acl = btrfs_set_acl,
10736 .update_time = btrfs_update_time,
10737 .fileattr_get = btrfs_fileattr_get,
10738 .fileattr_set = btrfs_fileattr_set,
10739 };
10740 static const struct inode_operations btrfs_special_inode_operations = {
10741 .getattr = btrfs_getattr,
10742 .setattr = btrfs_setattr,
10743 .permission = btrfs_permission,
10744 .listxattr = btrfs_listxattr,
10745 .get_inode_acl = btrfs_get_acl,
10746 .set_acl = btrfs_set_acl,
10747 .update_time = btrfs_update_time,
10748 };
10749 static const struct inode_operations btrfs_symlink_inode_operations = {
10750 .get_link = page_get_link,
10751 .getattr = btrfs_getattr,
10752 .setattr = btrfs_setattr,
10753 .permission = btrfs_permission,
10754 .listxattr = btrfs_listxattr,
10755 .update_time = btrfs_update_time,
10756 };
10757
10758 const struct dentry_operations btrfs_dentry_operations = {
10759 .d_delete = btrfs_dentry_delete,
10760 };
10761