1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/bio.h>
8 #include <linux/blk-cgroup.h>
9 #include <linux/file.h>
10 #include <linux/filelock.h>
11 #include <linux/fs.h>
12 #include <linux/fs_struct.h>
13 #include <linux/pagemap.h>
14 #include <linux/highmem.h>
15 #include <linux/time.h>
16 #include <linux/init.h>
17 #include <linux/string.h>
18 #include <linux/backing-dev.h>
19 #include <linux/writeback.h>
20 #include <linux/compat.h>
21 #include <linux/xattr.h>
22 #include <linux/posix_acl.h>
23 #include <linux/falloc.h>
24 #include <linux/slab.h>
25 #include <linux/ratelimit.h>
26 #include <linux/btrfs.h>
27 #include <linux/blkdev.h>
28 #include <linux/posix_acl_xattr.h>
29 #include <linux/uio.h>
30 #include <linux/magic.h>
31 #include <linux/iversion.h>
32 #include <linux/swap.h>
33 #include <linux/migrate.h>
34 #include <linux/sched/mm.h>
35 #include <linux/iomap.h>
36 #include <linux/unaligned.h>
37 #include "misc.h"
38 #include "ctree.h"
39 #include "disk-io.h"
40 #include "transaction.h"
41 #include "btrfs_inode.h"
42 #include "ordered-data.h"
43 #include "xattr.h"
44 #include "tree-log.h"
45 #include "bio.h"
46 #include "compression.h"
47 #include "locking.h"
48 #include "props.h"
49 #include "qgroup.h"
50 #include "delalloc-space.h"
51 #include "block-group.h"
52 #include "space-info.h"
53 #include "zoned.h"
54 #include "subpage.h"
55 #include "inode-item.h"
56 #include "fs.h"
57 #include "accessors.h"
58 #include "extent-tree.h"
59 #include "root-tree.h"
60 #include "defrag.h"
61 #include "dir-item.h"
62 #include "file-item.h"
63 #include "uuid-tree.h"
64 #include "ioctl.h"
65 #include "file.h"
66 #include "acl.h"
67 #include "relocation.h"
68 #include "verity.h"
69 #include "super.h"
70 #include "orphan.h"
71 #include "backref.h"
72 #include "raid-stripe-tree.h"
73 #include "fiemap.h"
74 #include "delayed-inode.h"
75
76 #define COW_FILE_RANGE_KEEP_LOCKED (1UL << 0)
77 #define COW_FILE_RANGE_NO_INLINE (1UL << 1)
78
79 struct btrfs_iget_args {
80 u64 ino;
81 struct btrfs_root *root;
82 };
83
84 struct btrfs_rename_ctx {
85 /* Output field. Stores the index number of the old directory entry. */
86 u64 index;
87 };
88
89 /*
90 * Used by data_reloc_print_warning_inode() to pass needed info for filename
91 * resolution and output of error message.
92 */
93 struct data_reloc_warn {
94 struct btrfs_path path;
95 struct btrfs_fs_info *fs_info;
96 u64 extent_item_size;
97 u64 logical;
98 int mirror_num;
99 };
100
101 /*
102 * For the file_extent_tree, we want to hold the inode lock when we lookup and
103 * update the disk_i_size, but lockdep will complain because our io_tree we hold
104 * the tree lock and get the inode lock when setting delalloc. These two things
105 * are unrelated, so make a class for the file_extent_tree so we don't get the
106 * two locking patterns mixed up.
107 */
108 static struct lock_class_key file_extent_tree_class;
109
110 static const struct inode_operations btrfs_dir_inode_operations;
111 static const struct inode_operations btrfs_symlink_inode_operations;
112 static const struct inode_operations btrfs_special_inode_operations;
113 static const struct inode_operations btrfs_file_inode_operations;
114 static const struct address_space_operations btrfs_aops;
115 static const struct file_operations btrfs_dir_file_operations;
116
117 static struct kmem_cache *btrfs_inode_cachep;
118
119 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
120 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback);
121
122 static noinline int run_delalloc_cow(struct btrfs_inode *inode,
123 struct folio *locked_folio, u64 start,
124 u64 end, struct writeback_control *wbc,
125 bool pages_dirty);
126
data_reloc_print_warning_inode(u64 inum,u64 offset,u64 num_bytes,u64 root,void * warn_ctx)127 static int data_reloc_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
128 u64 root, void *warn_ctx)
129 {
130 struct data_reloc_warn *warn = warn_ctx;
131 struct btrfs_fs_info *fs_info = warn->fs_info;
132 struct extent_buffer *eb;
133 struct btrfs_inode_item *inode_item;
134 struct inode_fs_paths *ipath __free(inode_fs_paths) = NULL;
135 struct btrfs_root *local_root;
136 struct btrfs_key key;
137 unsigned int nofs_flag;
138 u32 nlink;
139 int ret;
140
141 local_root = btrfs_get_fs_root(fs_info, root, true);
142 if (IS_ERR(local_root)) {
143 ret = PTR_ERR(local_root);
144 goto err;
145 }
146
147 /* This makes the path point to (inum INODE_ITEM ioff). */
148 key.objectid = inum;
149 key.type = BTRFS_INODE_ITEM_KEY;
150 key.offset = 0;
151
152 ret = btrfs_search_slot(NULL, local_root, &key, &warn->path, 0, 0);
153 if (ret) {
154 btrfs_put_root(local_root);
155 btrfs_release_path(&warn->path);
156 goto err;
157 }
158
159 eb = warn->path.nodes[0];
160 inode_item = btrfs_item_ptr(eb, warn->path.slots[0], struct btrfs_inode_item);
161 nlink = btrfs_inode_nlink(eb, inode_item);
162 btrfs_release_path(&warn->path);
163
164 nofs_flag = memalloc_nofs_save();
165 ipath = init_ipath(4096, local_root, &warn->path);
166 memalloc_nofs_restore(nofs_flag);
167 if (IS_ERR(ipath)) {
168 btrfs_put_root(local_root);
169 ret = PTR_ERR(ipath);
170 ipath = NULL;
171 /*
172 * -ENOMEM, not a critical error, just output an generic error
173 * without filename.
174 */
175 btrfs_warn(fs_info,
176 "checksum error at logical %llu mirror %u root %llu, inode %llu offset %llu",
177 warn->logical, warn->mirror_num, root, inum, offset);
178 return ret;
179 }
180 ret = paths_from_inode(inum, ipath);
181 if (ret < 0) {
182 btrfs_put_root(local_root);
183 goto err;
184 }
185
186 /*
187 * We deliberately ignore the bit ipath might have been too small to
188 * hold all of the paths here
189 */
190 for (int i = 0; i < ipath->fspath->elem_cnt; i++) {
191 btrfs_warn(fs_info,
192 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu length %u links %u (path: %s)",
193 warn->logical, warn->mirror_num, root, inum, offset,
194 fs_info->sectorsize, nlink,
195 (char *)(unsigned long)ipath->fspath->val[i]);
196 }
197
198 btrfs_put_root(local_root);
199 return 0;
200
201 err:
202 btrfs_warn(fs_info,
203 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu, path resolving failed with ret=%d",
204 warn->logical, warn->mirror_num, root, inum, offset, ret);
205
206 return ret;
207 }
208
209 /*
210 * Do extra user-friendly error output (e.g. lookup all the affected files).
211 *
212 * Return true if we succeeded doing the backref lookup.
213 * Return false if such lookup failed, and has to fallback to the old error message.
214 */
print_data_reloc_error(const struct btrfs_inode * inode,u64 file_off,const u8 * csum,const u8 * csum_expected,int mirror_num)215 static void print_data_reloc_error(const struct btrfs_inode *inode, u64 file_off,
216 const u8 *csum, const u8 *csum_expected,
217 int mirror_num)
218 {
219 struct btrfs_fs_info *fs_info = inode->root->fs_info;
220 BTRFS_PATH_AUTO_RELEASE(path);
221 struct btrfs_key found_key = { 0 };
222 struct extent_buffer *eb;
223 struct btrfs_extent_item *ei;
224 const u32 csum_size = fs_info->csum_size;
225 u64 logical;
226 u64 flags;
227 u32 item_size;
228 int ret;
229
230 mutex_lock(&fs_info->reloc_mutex);
231 logical = btrfs_get_reloc_bg_bytenr(fs_info);
232 mutex_unlock(&fs_info->reloc_mutex);
233
234 if (logical == U64_MAX) {
235 btrfs_warn_rl(fs_info, "has data reloc tree but no running relocation");
236 btrfs_warn_rl(fs_info,
237 "csum failed root %lld ino %llu off %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d",
238 btrfs_root_id(inode->root), btrfs_ino(inode), file_off,
239 BTRFS_CSUM_FMT_VALUE(csum_size, csum),
240 BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected),
241 mirror_num);
242 return;
243 }
244
245 logical += file_off;
246 btrfs_warn_rl(fs_info,
247 "csum failed root %lld ino %llu off %llu logical %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d",
248 btrfs_root_id(inode->root),
249 btrfs_ino(inode), file_off, logical,
250 BTRFS_CSUM_FMT_VALUE(csum_size, csum),
251 BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected),
252 mirror_num);
253
254 ret = extent_from_logical(fs_info, logical, &path, &found_key, &flags);
255 if (ret < 0) {
256 btrfs_err_rl(fs_info, "failed to lookup extent item for logical %llu: %d",
257 logical, ret);
258 return;
259 }
260 eb = path.nodes[0];
261 ei = btrfs_item_ptr(eb, path.slots[0], struct btrfs_extent_item);
262 item_size = btrfs_item_size(eb, path.slots[0]);
263 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
264 unsigned long ptr = 0;
265 u64 ref_root;
266 u8 ref_level;
267
268 while (true) {
269 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
270 item_size, &ref_root,
271 &ref_level);
272 if (ret < 0) {
273 btrfs_warn_rl(fs_info,
274 "failed to resolve tree backref for logical %llu: %d",
275 logical, ret);
276 break;
277 }
278 if (ret > 0)
279 break;
280
281 btrfs_warn_rl(fs_info,
282 "csum error at logical %llu mirror %u: metadata %s (level %d) in tree %llu",
283 logical, mirror_num,
284 (ref_level ? "node" : "leaf"),
285 ref_level, ref_root);
286 }
287 } else {
288 struct btrfs_backref_walk_ctx ctx = { 0 };
289 struct data_reloc_warn reloc_warn = { 0 };
290
291 /*
292 * Do not hold the path as later iterate_extent_inodes() call
293 * can be time consuming.
294 */
295 btrfs_release_path(&path);
296
297 ctx.bytenr = found_key.objectid;
298 ctx.extent_item_pos = logical - found_key.objectid;
299 ctx.fs_info = fs_info;
300
301 reloc_warn.logical = logical;
302 reloc_warn.extent_item_size = found_key.offset;
303 reloc_warn.mirror_num = mirror_num;
304 reloc_warn.fs_info = fs_info;
305
306 iterate_extent_inodes(&ctx, true,
307 data_reloc_print_warning_inode, &reloc_warn);
308 }
309 }
310
btrfs_print_data_csum_error(struct btrfs_inode * inode,u64 logical_start,u8 * csum,u8 * csum_expected,int mirror_num)311 static void __cold btrfs_print_data_csum_error(struct btrfs_inode *inode,
312 u64 logical_start, u8 *csum, u8 *csum_expected, int mirror_num)
313 {
314 struct btrfs_root *root = inode->root;
315 const u32 csum_size = root->fs_info->csum_size;
316
317 /* For data reloc tree, it's better to do a backref lookup instead. */
318 if (btrfs_is_data_reloc_root(root))
319 return print_data_reloc_error(inode, logical_start, csum,
320 csum_expected, mirror_num);
321
322 /* Output without objectid, which is more meaningful */
323 if (btrfs_root_id(root) >= BTRFS_LAST_FREE_OBJECTID) {
324 btrfs_warn_rl(root->fs_info,
325 "csum failed root %lld ino %lld off %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d",
326 btrfs_root_id(root), btrfs_ino(inode),
327 logical_start,
328 BTRFS_CSUM_FMT_VALUE(csum_size, csum),
329 BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected),
330 mirror_num);
331 } else {
332 btrfs_warn_rl(root->fs_info,
333 "csum failed root %llu ino %llu off %llu csum " BTRFS_CSUM_FMT " expected csum " BTRFS_CSUM_FMT " mirror %d",
334 btrfs_root_id(root), btrfs_ino(inode),
335 logical_start,
336 BTRFS_CSUM_FMT_VALUE(csum_size, csum),
337 BTRFS_CSUM_FMT_VALUE(csum_size, csum_expected),
338 mirror_num);
339 }
340 }
341
342 /*
343 * Lock inode i_rwsem based on arguments passed.
344 *
345 * ilock_flags can have the following bit set:
346 *
347 * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode
348 * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt
349 * return -EAGAIN
350 * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock
351 */
btrfs_inode_lock(struct btrfs_inode * inode,unsigned int ilock_flags)352 int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags)
353 {
354 if (ilock_flags & BTRFS_ILOCK_SHARED) {
355 if (ilock_flags & BTRFS_ILOCK_TRY) {
356 if (!inode_trylock_shared(&inode->vfs_inode))
357 return -EAGAIN;
358 else
359 return 0;
360 }
361 inode_lock_shared(&inode->vfs_inode);
362 } else {
363 if (ilock_flags & BTRFS_ILOCK_TRY) {
364 if (!inode_trylock(&inode->vfs_inode))
365 return -EAGAIN;
366 else
367 return 0;
368 }
369 inode_lock(&inode->vfs_inode);
370 }
371 if (ilock_flags & BTRFS_ILOCK_MMAP)
372 down_write(&inode->i_mmap_lock);
373 return 0;
374 }
375
376 /*
377 * Unlock inode i_rwsem.
378 *
379 * ilock_flags should contain the same bits set as passed to btrfs_inode_lock()
380 * to decide whether the lock acquired is shared or exclusive.
381 */
btrfs_inode_unlock(struct btrfs_inode * inode,unsigned int ilock_flags)382 void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags)
383 {
384 if (ilock_flags & BTRFS_ILOCK_MMAP)
385 up_write(&inode->i_mmap_lock);
386 if (ilock_flags & BTRFS_ILOCK_SHARED)
387 inode_unlock_shared(&inode->vfs_inode);
388 else
389 inode_unlock(&inode->vfs_inode);
390 }
391
392 /*
393 * Cleanup all submitted ordered extents in specified range to handle errors
394 * from the btrfs_run_delalloc_range() callback.
395 *
396 * NOTE: caller must ensure that when an error happens, it can not call
397 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
398 * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
399 * to be released, which we want to happen only when finishing the ordered
400 * extent (btrfs_finish_ordered_io()).
401 */
btrfs_cleanup_ordered_extents(struct btrfs_inode * inode,u64 offset,u64 bytes)402 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
403 u64 offset, u64 bytes)
404 {
405 pgoff_t index = offset >> PAGE_SHIFT;
406 const pgoff_t end_index = (offset + bytes - 1) >> PAGE_SHIFT;
407 struct folio *folio;
408
409 while (index <= end_index) {
410 folio = filemap_get_folio(inode->vfs_inode.i_mapping, index);
411 if (IS_ERR(folio)) {
412 index++;
413 continue;
414 }
415
416 index = folio_next_index(folio);
417 /*
418 * Here we just clear all Ordered bits for every page in the
419 * range, then btrfs_mark_ordered_io_finished() will handle
420 * the ordered extent accounting for the range.
421 */
422 btrfs_folio_clamp_clear_ordered(inode->root->fs_info, folio,
423 offset, bytes);
424 folio_put(folio);
425 }
426
427 return btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes, false);
428 }
429
430 static int btrfs_dirty_inode(struct btrfs_inode *inode);
431
btrfs_init_inode_security(struct btrfs_trans_handle * trans,struct btrfs_new_inode_args * args)432 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
433 struct btrfs_new_inode_args *args)
434 {
435 int ret;
436
437 if (args->default_acl) {
438 ret = __btrfs_set_acl(trans, args->inode, args->default_acl,
439 ACL_TYPE_DEFAULT);
440 if (ret)
441 return ret;
442 }
443 if (args->acl) {
444 ret = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS);
445 if (ret)
446 return ret;
447 }
448 if (!args->default_acl && !args->acl)
449 cache_no_acl(args->inode);
450 return btrfs_xattr_security_init(trans, args->inode, args->dir,
451 &args->dentry->d_name);
452 }
453
454 /*
455 * this does all the hard work for inserting an inline extent into
456 * the btree. The caller should have done a btrfs_drop_extents so that
457 * no overlapping inline items exist in the btree
458 */
insert_inline_extent(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_inode * inode,bool extent_inserted,size_t size,size_t compressed_size,int compress_type,struct folio * compressed_folio,bool update_i_size)459 static int insert_inline_extent(struct btrfs_trans_handle *trans,
460 struct btrfs_path *path,
461 struct btrfs_inode *inode, bool extent_inserted,
462 size_t size, size_t compressed_size,
463 int compress_type,
464 struct folio *compressed_folio,
465 bool update_i_size)
466 {
467 struct btrfs_root *root = inode->root;
468 struct extent_buffer *leaf;
469 const u32 sectorsize = trans->fs_info->sectorsize;
470 char *kaddr;
471 unsigned long ptr;
472 struct btrfs_file_extent_item *ei;
473 int ret;
474 size_t cur_size = size;
475 u64 i_size;
476
477 /*
478 * The decompressed size must still be no larger than a sector. Under
479 * heavy race, we can have size == 0 passed in, but that shouldn't be a
480 * big deal and we can continue the insertion.
481 */
482 ASSERT(size <= sectorsize);
483
484 /*
485 * The compressed size also needs to be no larger than a page.
486 * That's also why we only need one folio as the parameter.
487 */
488 if (compressed_folio) {
489 ASSERT(compressed_size <= sectorsize);
490 ASSERT(compressed_size <= PAGE_SIZE);
491 } else {
492 ASSERT(compressed_size == 0);
493 }
494
495 if (compressed_size && compressed_folio)
496 cur_size = compressed_size;
497
498 if (!extent_inserted) {
499 struct btrfs_key key;
500 size_t datasize;
501
502 key.objectid = btrfs_ino(inode);
503 key.type = BTRFS_EXTENT_DATA_KEY;
504 key.offset = 0;
505
506 datasize = btrfs_file_extent_calc_inline_size(cur_size);
507 ret = btrfs_insert_empty_item(trans, root, path, &key,
508 datasize);
509 if (ret)
510 return ret;
511 }
512 leaf = path->nodes[0];
513 ei = btrfs_item_ptr(leaf, path->slots[0],
514 struct btrfs_file_extent_item);
515 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
516 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
517 btrfs_set_file_extent_encryption(leaf, ei, 0);
518 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
519 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
520 ptr = btrfs_file_extent_inline_start(ei);
521
522 if (compress_type != BTRFS_COMPRESS_NONE) {
523 kaddr = kmap_local_folio(compressed_folio, 0);
524 write_extent_buffer(leaf, kaddr, ptr, compressed_size);
525 kunmap_local(kaddr);
526
527 btrfs_set_file_extent_compression(leaf, ei,
528 compress_type);
529 } else {
530 struct folio *folio;
531
532 folio = filemap_get_folio(inode->vfs_inode.i_mapping, 0);
533 ASSERT(!IS_ERR(folio));
534 btrfs_set_file_extent_compression(leaf, ei, 0);
535 kaddr = kmap_local_folio(folio, 0);
536 write_extent_buffer(leaf, kaddr, ptr, size);
537 kunmap_local(kaddr);
538 folio_put(folio);
539 }
540 btrfs_release_path(path);
541
542 /*
543 * We align size to sectorsize for inline extents just for simplicity
544 * sake.
545 */
546 ret = btrfs_inode_set_file_extent_range(inode, 0,
547 ALIGN(size, root->fs_info->sectorsize));
548 if (ret)
549 return ret;
550
551 /*
552 * We're an inline extent, so nobody can extend the file past i_size
553 * without locking a page we already have locked.
554 *
555 * We must do any i_size and inode updates before we unlock the pages.
556 * Otherwise we could end up racing with unlink.
557 */
558 i_size = i_size_read(&inode->vfs_inode);
559 if (update_i_size && size > i_size) {
560 i_size_write(&inode->vfs_inode, size);
561 i_size = size;
562 }
563 inode->disk_i_size = i_size;
564
565 return 0;
566 }
567
can_cow_file_range_inline(struct btrfs_inode * inode,u64 offset,u64 size,size_t compressed_size)568 static bool can_cow_file_range_inline(struct btrfs_inode *inode,
569 u64 offset, u64 size,
570 size_t compressed_size)
571 {
572 struct btrfs_fs_info *fs_info = inode->root->fs_info;
573 u64 data_len = (compressed_size ?: size);
574
575 /* Inline extents must start at offset 0. */
576 if (offset != 0)
577 return false;
578
579 /*
580 * Even for bs > ps cases, cow_file_range_inline() can only accept a
581 * single folio.
582 *
583 * This can be problematic and cause access beyond page boundary if a
584 * page sized folio is passed into that function.
585 * And encoded write is doing exactly that.
586 * So here limits the inlined extent size to PAGE_SIZE.
587 */
588 if (size > PAGE_SIZE || compressed_size > PAGE_SIZE)
589 return false;
590
591 /* Inline extents are limited to sectorsize. */
592 if (size > fs_info->sectorsize)
593 return false;
594
595 /* We do not allow a non-compressed extent to be as large as block size. */
596 if (data_len >= fs_info->sectorsize)
597 return false;
598
599 /* We cannot exceed the maximum inline data size. */
600 if (data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
601 return false;
602
603 /* We cannot exceed the user specified max_inline size. */
604 if (data_len > fs_info->max_inline)
605 return false;
606
607 /* Inline extents must be the entirety of the file. */
608 if (size < i_size_read(&inode->vfs_inode))
609 return false;
610
611 /* Encrypted file cannot be inlined. */
612 if (IS_ENCRYPTED(&inode->vfs_inode))
613 return false;
614
615 return true;
616 }
617
618 /*
619 * conditionally insert an inline extent into the file. This
620 * does the checks required to make sure the data is small enough
621 * to fit as an inline extent.
622 *
623 * If being used directly, you must have already checked we're allowed to cow
624 * the range by getting true from can_cow_file_range_inline().
625 */
__cow_file_range_inline(struct btrfs_inode * inode,u64 size,size_t compressed_size,int compress_type,struct folio * compressed_folio,bool update_i_size)626 static noinline int __cow_file_range_inline(struct btrfs_inode *inode,
627 u64 size, size_t compressed_size,
628 int compress_type,
629 struct folio *compressed_folio,
630 bool update_i_size)
631 {
632 struct btrfs_drop_extents_args drop_args = { 0 };
633 struct btrfs_root *root = inode->root;
634 struct btrfs_fs_info *fs_info = root->fs_info;
635 struct btrfs_trans_handle *trans = NULL;
636 u64 data_len = (compressed_size ?: size);
637 int ret;
638 struct btrfs_path *path;
639
640 path = btrfs_alloc_path();
641 if (!path) {
642 ret = -ENOMEM;
643 goto out;
644 }
645
646 trans = btrfs_join_transaction(root);
647 if (IS_ERR(trans)) {
648 ret = PTR_ERR(trans);
649 trans = NULL;
650 goto out;
651 }
652 trans->block_rsv = &inode->block_rsv;
653
654 drop_args.path = path;
655 drop_args.start = 0;
656 drop_args.end = fs_info->sectorsize;
657 drop_args.drop_cache = true;
658 drop_args.replace_extent = true;
659 drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(data_len);
660 ret = btrfs_drop_extents(trans, root, inode, &drop_args);
661 if (unlikely(ret)) {
662 btrfs_abort_transaction(trans, ret);
663 goto out;
664 }
665
666 ret = insert_inline_extent(trans, path, inode, drop_args.extent_inserted,
667 size, compressed_size, compress_type,
668 compressed_folio, update_i_size);
669 if (unlikely(ret && ret != -ENOSPC)) {
670 btrfs_abort_transaction(trans, ret);
671 goto out;
672 } else if (ret == -ENOSPC) {
673 ret = 1;
674 goto out;
675 }
676
677 btrfs_update_inode_bytes(inode, size, drop_args.bytes_found);
678 ret = btrfs_update_inode(trans, inode);
679 if (unlikely(ret && ret != -ENOSPC)) {
680 btrfs_abort_transaction(trans, ret);
681 goto out;
682 } else if (ret == -ENOSPC) {
683 ret = 1;
684 goto out;
685 }
686
687 btrfs_set_inode_full_sync(inode);
688 out:
689 /*
690 * Don't forget to free the reserved space, as for inlined extent
691 * it won't count as data extent, free them directly here.
692 * And at reserve time, it's always aligned to sector size, so
693 * just free one sector here.
694 *
695 * If we fallback to non-inline (ret == 1) due to -ENOSPC, then we need
696 * to keep the data reservation.
697 */
698 if (ret <= 0)
699 btrfs_qgroup_free_data(inode, NULL, 0, fs_info->sectorsize, NULL);
700 btrfs_free_path(path);
701 if (trans)
702 btrfs_end_transaction(trans);
703 return ret;
704 }
705
cow_file_range_inline(struct btrfs_inode * inode,struct folio * locked_folio,u64 offset,u64 end,size_t compressed_size,int compress_type,struct folio * compressed_folio,bool update_i_size)706 static noinline int cow_file_range_inline(struct btrfs_inode *inode,
707 struct folio *locked_folio,
708 u64 offset, u64 end,
709 size_t compressed_size,
710 int compress_type,
711 struct folio *compressed_folio,
712 bool update_i_size)
713 {
714 struct extent_state *cached = NULL;
715 unsigned long clear_flags = EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
716 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING | EXTENT_LOCKED;
717 u64 size = min_t(u64, i_size_read(&inode->vfs_inode), end + 1);
718 int ret;
719
720 if (!can_cow_file_range_inline(inode, offset, size, compressed_size))
721 return 1;
722
723 btrfs_lock_extent(&inode->io_tree, offset, end, &cached);
724 ret = __cow_file_range_inline(inode, size, compressed_size,
725 compress_type, compressed_folio,
726 update_i_size);
727 if (ret > 0) {
728 btrfs_unlock_extent(&inode->io_tree, offset, end, &cached);
729 return ret;
730 }
731
732 /*
733 * In the successful case (ret == 0 here), cow_file_range will return 1.
734 *
735 * Quite a bit further up the callstack in extent_writepage(), ret == 1
736 * is treated as a short circuited success and does not unlock the folio,
737 * so we must do it here.
738 *
739 * In the failure case, the locked_folio does get unlocked by
740 * btrfs_folio_end_all_writers, which asserts that it is still locked
741 * at that point, so we must *not* unlock it here.
742 *
743 * The other two callsites in compress_file_range do not have a
744 * locked_folio, so they are not relevant to this logic.
745 */
746 if (ret == 0)
747 locked_folio = NULL;
748
749 extent_clear_unlock_delalloc(inode, offset, end, locked_folio, &cached,
750 clear_flags, PAGE_UNLOCK |
751 PAGE_START_WRITEBACK | PAGE_END_WRITEBACK);
752 return ret;
753 }
754
755 struct async_extent {
756 u64 start;
757 u64 ram_size;
758 struct compressed_bio *cb;
759 struct list_head list;
760 };
761
762 struct async_chunk {
763 struct btrfs_inode *inode;
764 struct folio *locked_folio;
765 u64 start;
766 u64 end;
767 blk_opf_t write_flags;
768 struct list_head extents;
769 struct cgroup_subsys_state *blkcg_css;
770 struct btrfs_work work;
771 struct async_cow *async_cow;
772 };
773
774 struct async_cow {
775 atomic_t num_chunks;
776 struct async_chunk chunks[];
777 };
778
add_async_extent(struct async_chunk * cow,u64 start,u64 ram_size,struct compressed_bio * cb)779 static int add_async_extent(struct async_chunk *cow, u64 start, u64 ram_size,
780 struct compressed_bio *cb)
781 {
782 struct async_extent *async_extent;
783
784 async_extent = kmalloc_obj(*async_extent, GFP_NOFS);
785 if (!async_extent)
786 return -ENOMEM;
787 ASSERT(ram_size < U32_MAX);
788 async_extent->start = start;
789 async_extent->ram_size = ram_size;
790 async_extent->cb = cb;
791 list_add_tail(&async_extent->list, &cow->extents);
792 return 0;
793 }
794
795 /*
796 * Check if the inode needs to be submitted to compression, based on mount
797 * options, defragmentation, properties or heuristics.
798 */
inode_need_compress(struct btrfs_inode * inode,u64 start,u64 end)799 static inline int inode_need_compress(struct btrfs_inode *inode, u64 start,
800 u64 end)
801 {
802 struct btrfs_fs_info *fs_info = inode->root->fs_info;
803
804 if (!btrfs_inode_can_compress(inode)) {
805 DEBUG_WARN("BTRFS: unexpected compression for ino %llu", btrfs_ino(inode));
806 return 0;
807 }
808
809 /*
810 * If the delalloc range is only one fs block and can not be inlined,
811 * do not even bother try compression, as there will be no space saving
812 * and will always fallback to regular write later.
813 */
814 if (start != 0 && end + 1 - start <= fs_info->sectorsize)
815 return 0;
816 /* Defrag ioctl takes precedence over mount options and properties. */
817 if (inode->defrag_compress == BTRFS_DEFRAG_DONT_COMPRESS)
818 return 0;
819 if (BTRFS_COMPRESS_NONE < inode->defrag_compress &&
820 inode->defrag_compress < BTRFS_NR_COMPRESS_TYPES)
821 return 1;
822 /* force compress */
823 if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
824 return 1;
825 /* bad compression ratios */
826 if (inode->flags & BTRFS_INODE_NOCOMPRESS)
827 return 0;
828 if (btrfs_test_opt(fs_info, COMPRESS) ||
829 inode->flags & BTRFS_INODE_COMPRESS ||
830 inode->prop_compress)
831 return btrfs_compress_heuristic(inode, start, end);
832 return 0;
833 }
834
inode_should_defrag(struct btrfs_inode * inode,u64 start,u64 end,u64 num_bytes,u32 small_write)835 static inline void inode_should_defrag(struct btrfs_inode *inode,
836 u64 start, u64 end, u64 num_bytes, u32 small_write)
837 {
838 /* If this is a small write inside eof, kick off a defrag */
839 if (num_bytes < small_write &&
840 (start > 0 || end + 1 < inode->disk_i_size))
841 btrfs_add_inode_defrag(inode, small_write);
842 }
843
extent_range_clear_dirty_for_io(struct btrfs_inode * inode,u64 start,u64 end)844 static int extent_range_clear_dirty_for_io(struct btrfs_inode *inode, u64 start, u64 end)
845 {
846 const pgoff_t end_index = end >> PAGE_SHIFT;
847 struct folio *folio;
848 int ret = 0;
849
850 for (pgoff_t index = start >> PAGE_SHIFT; index <= end_index; index++) {
851 folio = filemap_get_folio(inode->vfs_inode.i_mapping, index);
852 if (IS_ERR(folio)) {
853 if (!ret)
854 ret = PTR_ERR(folio);
855 continue;
856 }
857 btrfs_folio_clamp_clear_dirty(inode->root->fs_info, folio, start,
858 end + 1 - start);
859 folio_put(folio);
860 }
861 return ret;
862 }
863
compressed_bio_last_folio(struct compressed_bio * cb)864 static struct folio *compressed_bio_last_folio(struct compressed_bio *cb)
865 {
866 struct bio *bio = &cb->bbio.bio;
867 struct bio_vec *bvec;
868 phys_addr_t paddr;
869
870 /*
871 * Make sure all folios have the same min_folio_size.
872 *
873 * Otherwise we cannot simply use offset_in_offset(folio, bi_size) to
874 * calculate the end of the last folio.
875 */
876 if (IS_ENABLED(CONFIG_BTRFS_ASSERT)) {
877 struct btrfs_fs_info *fs_info = cb_to_fs_info(cb);
878 const u32 min_folio_size = btrfs_min_folio_size(fs_info);
879 struct folio_iter fi;
880
881 bio_for_each_folio_all(fi, bio)
882 ASSERT(folio_size(fi.folio) == min_folio_size);
883 }
884
885 /* The bio must not be empty. */
886 ASSERT(bio->bi_vcnt);
887
888 bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
889 paddr = page_to_phys(bvec->bv_page) + bvec->bv_offset + bvec->bv_len - 1;
890 return page_folio(phys_to_page(paddr));
891 }
892
zero_last_folio(struct compressed_bio * cb)893 static void zero_last_folio(struct compressed_bio *cb)
894 {
895 struct bio *bio = &cb->bbio.bio;
896 struct folio *last_folio = compressed_bio_last_folio(cb);
897 const u32 bio_size = bio->bi_iter.bi_size;
898 const u32 foffset = offset_in_folio(last_folio, bio_size);
899
900 folio_zero_range(last_folio, foffset, folio_size(last_folio) - foffset);
901 }
902
round_up_last_block(struct compressed_bio * cb,u32 blocksize)903 static void round_up_last_block(struct compressed_bio *cb, u32 blocksize)
904 {
905 struct bio *bio = &cb->bbio.bio;
906 struct folio *last_folio = compressed_bio_last_folio(cb);
907 const u32 bio_size = bio->bi_iter.bi_size;
908 const u32 foffset = offset_in_folio(last_folio, bio_size);
909 bool ret;
910
911 if (IS_ALIGNED(bio_size, blocksize))
912 return;
913
914 ret = bio_add_folio(bio, last_folio, round_up(foffset, blocksize) - foffset, foffset);
915 /* The remaining part should be merged thus never fail. */
916 ASSERT(ret);
917 }
918
919 /*
920 * Work queue call back to started compression on a file and pages.
921 *
922 * This is done inside an ordered work queue, and the compression is spread
923 * across many cpus. The actual IO submission is step two, and the ordered work
924 * queue takes care of making sure that happens in the same order things were
925 * put onto the queue by writepages and friends.
926 *
927 * If this code finds it can't get good compression, it puts an entry onto the
928 * work queue to write the uncompressed bytes. This makes sure that both
929 * compressed inodes and uncompressed inodes are written in the same order that
930 * the flusher thread sent them down.
931 */
compress_file_range(struct btrfs_work * work)932 static void compress_file_range(struct btrfs_work *work)
933 {
934 struct async_chunk *async_chunk =
935 container_of(work, struct async_chunk, work);
936 struct btrfs_inode *inode = async_chunk->inode;
937 struct btrfs_fs_info *fs_info = inode->root->fs_info;
938 struct address_space *mapping = inode->vfs_inode.i_mapping;
939 struct compressed_bio *cb = NULL;
940 const u32 min_folio_size = btrfs_min_folio_size(fs_info);
941 u64 blocksize = fs_info->sectorsize;
942 u64 start = async_chunk->start;
943 u64 end = async_chunk->end;
944 u64 actual_end;
945 u64 i_size;
946 u32 cur_len;
947 int ret = 0;
948 unsigned long total_compressed = 0;
949 unsigned long total_in = 0;
950 unsigned int loff;
951 int compress_type = fs_info->compress_type;
952 int compress_level = fs_info->compress_level;
953
954 if (btrfs_is_shutdown(fs_info))
955 goto cleanup_and_bail_uncompressed;
956
957 inode_should_defrag(inode, start, end, end - start + 1, SZ_16K);
958
959 /*
960 * We need to call clear_page_dirty_for_io on each page in the range.
961 * Otherwise applications with the file mmap'd can wander in and change
962 * the page contents while we are compressing them.
963 */
964 ret = extent_range_clear_dirty_for_io(inode, start, end);
965
966 /*
967 * All the folios should have been locked thus no failure.
968 *
969 * And even if some folios are missing, btrfs_compress_bio()
970 * would handle them correctly, so here just do an ASSERT() check for
971 * early logic errors.
972 */
973 ASSERT(ret == 0);
974
975 /*
976 * We need to save i_size before now because it could change in between
977 * us evaluating the size and assigning it. This is because we lock and
978 * unlock the page in truncate and fallocate, and then modify the i_size
979 * later on.
980 *
981 * The barriers are to emulate READ_ONCE, remove that once i_size_read
982 * does that for us.
983 */
984 barrier();
985 i_size = i_size_read(&inode->vfs_inode);
986 barrier();
987 actual_end = min_t(u64, i_size, end + 1);
988 again:
989 total_in = 0;
990 cur_len = min(end + 1 - start, BTRFS_MAX_UNCOMPRESSED);
991 ret = 0;
992 cb = NULL;
993
994 /*
995 * we don't want to send crud past the end of i_size through
996 * compression, that's just a waste of CPU time. So, if the
997 * end of the file is before the start of our current
998 * requested range of bytes, we bail out to the uncompressed
999 * cleanup code that can deal with all of this.
1000 *
1001 * It isn't really the fastest way to fix things, but this is a
1002 * very uncommon corner.
1003 */
1004 if (actual_end <= start)
1005 goto cleanup_and_bail_uncompressed;
1006
1007 /*
1008 * We do compression for mount -o compress and when the inode has not
1009 * been flagged as NOCOMPRESS. This flag can change at any time if we
1010 * discover bad compression ratios.
1011 */
1012 if (!inode_need_compress(inode, start, end))
1013 goto cleanup_and_bail_uncompressed;
1014
1015 if (0 < inode->defrag_compress && inode->defrag_compress < BTRFS_NR_COMPRESS_TYPES) {
1016 compress_type = inode->defrag_compress;
1017 compress_level = inode->defrag_compress_level;
1018 } else if (inode->prop_compress) {
1019 compress_type = inode->prop_compress;
1020 }
1021
1022 /* Compression level is applied here. */
1023 cb = btrfs_compress_bio(inode, start, cur_len, compress_type,
1024 compress_level, async_chunk->write_flags);
1025 if (IS_ERR(cb)) {
1026 cb = NULL;
1027 goto mark_incompressible;
1028 }
1029
1030 total_compressed = cb->bbio.bio.bi_iter.bi_size;
1031 total_in = cur_len;
1032
1033 /*
1034 * Zero the tail end of the last folio, as we might be sending it down
1035 * to disk.
1036 */
1037 loff = (total_compressed & (min_folio_size - 1));
1038 if (loff)
1039 zero_last_folio(cb);
1040
1041 /*
1042 * Try to create an inline extent.
1043 *
1044 * If we didn't compress the entire range, try to create an uncompressed
1045 * inline extent, else a compressed one.
1046 *
1047 * Check cow_file_range() for why we don't even try to create inline
1048 * extent for the subpage case.
1049 */
1050 if (total_in < actual_end)
1051 ret = cow_file_range_inline(inode, NULL, start, end, 0,
1052 BTRFS_COMPRESS_NONE, NULL, false);
1053 else
1054 ret = cow_file_range_inline(inode, NULL, start, end, total_compressed,
1055 compress_type,
1056 bio_first_folio_all(&cb->bbio.bio), false);
1057 if (ret <= 0) {
1058 cleanup_compressed_bio(cb);
1059 if (ret < 0)
1060 mapping_set_error(mapping, -EIO);
1061 return;
1062 }
1063
1064 /*
1065 * We aren't doing an inline extent. Round the compressed size up to a
1066 * block size boundary so the allocator does sane things.
1067 */
1068 total_compressed = ALIGN(total_compressed, blocksize);
1069 round_up_last_block(cb, blocksize);
1070
1071 /*
1072 * One last check to make sure the compression is really a win, compare
1073 * the page count read with the blocks on disk, compression must free at
1074 * least one sector.
1075 */
1076 total_in = round_up(total_in, fs_info->sectorsize);
1077 if (total_compressed + blocksize > total_in)
1078 goto mark_incompressible;
1079
1080
1081 /*
1082 * The async work queues will take care of doing actual allocation on
1083 * disk for these compressed pages, and will submit the bios.
1084 */
1085 ret = add_async_extent(async_chunk, start, total_in, cb);
1086 BUG_ON(ret);
1087 if (start + total_in < end) {
1088 start += total_in;
1089 cond_resched();
1090 goto again;
1091 }
1092 return;
1093
1094 mark_incompressible:
1095 if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && !inode->prop_compress)
1096 inode->flags |= BTRFS_INODE_NOCOMPRESS;
1097 cleanup_and_bail_uncompressed:
1098 ret = add_async_extent(async_chunk, start, end - start + 1, NULL);
1099 BUG_ON(ret);
1100 if (cb)
1101 cleanup_compressed_bio(cb);
1102 }
1103
submit_uncompressed_range(struct btrfs_inode * inode,struct async_extent * async_extent,struct folio * locked_folio)1104 static void submit_uncompressed_range(struct btrfs_inode *inode,
1105 struct async_extent *async_extent,
1106 struct folio *locked_folio)
1107 {
1108 u64 start = async_extent->start;
1109 u64 end = async_extent->start + async_extent->ram_size - 1;
1110 int ret;
1111 struct writeback_control wbc = {
1112 .sync_mode = WB_SYNC_ALL,
1113 .range_start = start,
1114 .range_end = end,
1115 .no_cgroup_owner = 1,
1116 };
1117
1118 wbc_attach_fdatawrite_inode(&wbc, &inode->vfs_inode);
1119 ret = run_delalloc_cow(inode, locked_folio, start, end,
1120 &wbc, false);
1121 wbc_detach_inode(&wbc);
1122 if (ret < 0) {
1123 if (locked_folio)
1124 btrfs_folio_end_lock(inode->root->fs_info, locked_folio,
1125 start, async_extent->ram_size);
1126 btrfs_err_rl(inode->root->fs_info,
1127 "%s failed, root=%llu inode=%llu start=%llu len=%llu: %d",
1128 __func__, btrfs_root_id(inode->root),
1129 btrfs_ino(inode), start, async_extent->ram_size, ret);
1130 }
1131 }
1132
submit_one_async_extent(struct async_chunk * async_chunk,struct async_extent * async_extent,u64 * alloc_hint)1133 static void submit_one_async_extent(struct async_chunk *async_chunk,
1134 struct async_extent *async_extent,
1135 u64 *alloc_hint)
1136 {
1137 struct btrfs_inode *inode = async_chunk->inode;
1138 struct extent_io_tree *io_tree = &inode->io_tree;
1139 struct btrfs_root *root = inode->root;
1140 struct btrfs_fs_info *fs_info = root->fs_info;
1141 struct btrfs_ordered_extent *ordered;
1142 struct btrfs_file_extent file_extent;
1143 struct btrfs_key ins;
1144 struct folio *locked_folio = NULL;
1145 struct extent_state *cached = NULL;
1146 struct extent_map *em;
1147 int ret = 0;
1148 u32 compressed_size;
1149 u64 start = async_extent->start;
1150 u64 end = async_extent->start + async_extent->ram_size - 1;
1151
1152 if (async_chunk->blkcg_css)
1153 kthread_associate_blkcg(async_chunk->blkcg_css);
1154
1155 /*
1156 * If async_chunk->locked_folio is in the async_extent range, we need to
1157 * handle it.
1158 */
1159 if (async_chunk->locked_folio) {
1160 u64 locked_folio_start = folio_pos(async_chunk->locked_folio);
1161 u64 locked_folio_end = locked_folio_start +
1162 folio_size(async_chunk->locked_folio) - 1;
1163
1164 if (!(start >= locked_folio_end || end <= locked_folio_start))
1165 locked_folio = async_chunk->locked_folio;
1166 }
1167
1168 if (!async_extent->cb) {
1169 submit_uncompressed_range(inode, async_extent, locked_folio);
1170 goto done;
1171 }
1172
1173 compressed_size = async_extent->cb->bbio.bio.bi_iter.bi_size;
1174 ret = btrfs_reserve_extent(root, async_extent->ram_size,
1175 compressed_size, compressed_size,
1176 0, *alloc_hint, &ins, true, true);
1177 if (ret) {
1178 /*
1179 * We can't reserve contiguous space for the compressed size.
1180 * Unlikely, but it's possible that we could have enough
1181 * non-contiguous space for the uncompressed size instead. So
1182 * fall back to uncompressed.
1183 */
1184 submit_uncompressed_range(inode, async_extent, locked_folio);
1185 cleanup_compressed_bio(async_extent->cb);
1186 async_extent->cb = NULL;
1187 goto done;
1188 }
1189
1190 btrfs_lock_extent(io_tree, start, end, &cached);
1191
1192 /* Here we're doing allocation and writeback of the compressed pages */
1193 file_extent.disk_bytenr = ins.objectid;
1194 file_extent.disk_num_bytes = ins.offset;
1195 file_extent.ram_bytes = async_extent->ram_size;
1196 file_extent.num_bytes = async_extent->ram_size;
1197 file_extent.offset = 0;
1198 file_extent.compression = async_extent->cb->compress_type;
1199
1200 async_extent->cb->bbio.bio.bi_iter.bi_sector = ins.objectid >> SECTOR_SHIFT;
1201
1202 em = btrfs_create_io_em(inode, start, &file_extent, BTRFS_ORDERED_COMPRESSED);
1203 if (IS_ERR(em)) {
1204 ret = PTR_ERR(em);
1205 goto out_free_reserve;
1206 }
1207 btrfs_free_extent_map(em);
1208
1209 ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
1210 1U << BTRFS_ORDERED_COMPRESSED);
1211 if (IS_ERR(ordered)) {
1212 btrfs_drop_extent_map_range(inode, start, end, false);
1213 ret = PTR_ERR(ordered);
1214 goto out_free_reserve;
1215 }
1216 async_extent->cb->bbio.ordered = ordered;
1217 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1218
1219 /* Clear dirty, set writeback and unlock the pages. */
1220 extent_clear_unlock_delalloc(inode, start, end,
1221 NULL, &cached, EXTENT_LOCKED | EXTENT_DELALLOC,
1222 PAGE_UNLOCK | PAGE_START_WRITEBACK);
1223 btrfs_submit_bbio(&async_extent->cb->bbio, 0);
1224 async_extent->cb = NULL;
1225
1226 *alloc_hint = ins.objectid + ins.offset;
1227 done:
1228 if (async_chunk->blkcg_css)
1229 kthread_associate_blkcg(NULL);
1230 kfree(async_extent);
1231 return;
1232
1233 out_free_reserve:
1234 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1235 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, true);
1236 mapping_set_error(inode->vfs_inode.i_mapping, -EIO);
1237 extent_clear_unlock_delalloc(inode, start, end,
1238 NULL, &cached,
1239 EXTENT_LOCKED | EXTENT_DELALLOC |
1240 EXTENT_DELALLOC_NEW |
1241 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
1242 PAGE_UNLOCK | PAGE_START_WRITEBACK |
1243 PAGE_END_WRITEBACK);
1244 if (async_extent->cb)
1245 cleanup_compressed_bio(async_extent->cb);
1246 if (async_chunk->blkcg_css)
1247 kthread_associate_blkcg(NULL);
1248 btrfs_debug(fs_info,
1249 "async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d",
1250 btrfs_root_id(root), btrfs_ino(inode), start,
1251 async_extent->ram_size, ret);
1252 kfree(async_extent);
1253 }
1254
btrfs_get_extent_allocation_hint(struct btrfs_inode * inode,u64 start,u64 num_bytes)1255 u64 btrfs_get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
1256 u64 num_bytes)
1257 {
1258 struct extent_map_tree *em_tree = &inode->extent_tree;
1259 struct extent_map *em;
1260 u64 alloc_hint = 0;
1261
1262 read_lock(&em_tree->lock);
1263 em = btrfs_search_extent_mapping(em_tree, start, num_bytes);
1264 if (em) {
1265 /*
1266 * if block start isn't an actual block number then find the
1267 * first block in this inode and use that as a hint. If that
1268 * block is also bogus then just don't worry about it.
1269 */
1270 if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
1271 btrfs_free_extent_map(em);
1272 em = btrfs_search_extent_mapping(em_tree, 0, 0);
1273 if (em && em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
1274 alloc_hint = btrfs_extent_map_block_start(em);
1275 if (em)
1276 btrfs_free_extent_map(em);
1277 } else {
1278 alloc_hint = btrfs_extent_map_block_start(em);
1279 btrfs_free_extent_map(em);
1280 }
1281 }
1282 read_unlock(&em_tree->lock);
1283
1284 return alloc_hint;
1285 }
1286
1287 /*
1288 * Handle COW for one range.
1289 *
1290 * @ins: The key representing the allocated range.
1291 * @file_offset: The file offset of the COW range
1292 * @num_bytes: The expected length of the COW range
1293 * The actually allocated length can be smaller than it.
1294 * @min_alloc_size: The minimal extent size.
1295 * @alloc_hint: The hint for the extent allocator.
1296 * @ret_alloc_size: The COW range handles by this function.
1297 *
1298 * Return 0 if everything is fine and update @ret_alloc_size updated. The
1299 * range is still locked, and caller should unlock the range after everything
1300 * is done or for error handling.
1301 *
1302 * Return <0 for error and @is updated for where the extra cleanup should
1303 * happen. The range [file_offset, file_offset + ret_alloc_size) will be
1304 * cleaned up by this function.
1305 */
cow_one_range(struct btrfs_inode * inode,struct folio * locked_folio,struct btrfs_key * ins,struct extent_state ** cached,u64 file_offset,u32 num_bytes,u32 min_alloc_size,u64 alloc_hint,u32 * ret_alloc_size)1306 static int cow_one_range(struct btrfs_inode *inode, struct folio *locked_folio,
1307 struct btrfs_key *ins, struct extent_state **cached,
1308 u64 file_offset, u32 num_bytes, u32 min_alloc_size,
1309 u64 alloc_hint, u32 *ret_alloc_size)
1310 {
1311 struct btrfs_root *root = inode->root;
1312 struct btrfs_fs_info *fs_info = root->fs_info;
1313 struct btrfs_ordered_extent *ordered;
1314 struct btrfs_file_extent file_extent;
1315 struct extent_map *em;
1316 u32 cur_len = 0;
1317 u64 cur_end;
1318 int ret;
1319
1320 ret = btrfs_reserve_extent(root, num_bytes, num_bytes, min_alloc_size,
1321 0, alloc_hint, ins, true, true);
1322 if (ret < 0) {
1323 *ret_alloc_size = cur_len;
1324 return ret;
1325 }
1326
1327 cur_len = ins->offset;
1328 cur_end = file_offset + cur_len - 1;
1329
1330 file_extent.disk_bytenr = ins->objectid;
1331 file_extent.disk_num_bytes = ins->offset;
1332 file_extent.num_bytes = ins->offset;
1333 file_extent.ram_bytes = ins->offset;
1334 file_extent.offset = 0;
1335 file_extent.compression = BTRFS_COMPRESS_NONE;
1336
1337 /*
1338 * Locked range will be released either during error clean up (inside
1339 * this function or by the caller for previously successful ranges) or
1340 * after the whole range is finished.
1341 */
1342 btrfs_lock_extent(&inode->io_tree, file_offset, cur_end, cached);
1343 em = btrfs_create_io_em(inode, file_offset, &file_extent, BTRFS_ORDERED_REGULAR);
1344 if (IS_ERR(em)) {
1345 ret = PTR_ERR(em);
1346 goto free_reserved;
1347 }
1348 btrfs_free_extent_map(em);
1349
1350 ordered = btrfs_alloc_ordered_extent(inode, file_offset, &file_extent,
1351 1U << BTRFS_ORDERED_REGULAR);
1352 if (IS_ERR(ordered)) {
1353 btrfs_drop_extent_map_range(inode, file_offset, cur_end, false);
1354 ret = PTR_ERR(ordered);
1355 goto free_reserved;
1356 }
1357
1358 if (btrfs_is_data_reloc_root(root)) {
1359 ret = btrfs_reloc_clone_csums(ordered);
1360
1361 /*
1362 * Only drop cache here, and process as normal.
1363 *
1364 * We must not allow extent_clear_unlock_delalloc() at
1365 * free_reserved label to free meta of this ordered extent, as
1366 * its meta should be freed by btrfs_finish_ordered_io().
1367 *
1368 * So we must continue until @start is increased to
1369 * skip current ordered extent.
1370 */
1371 if (ret)
1372 btrfs_drop_extent_map_range(inode, file_offset,
1373 cur_end, false);
1374 }
1375 btrfs_put_ordered_extent(ordered);
1376 btrfs_dec_block_group_reservations(fs_info, ins->objectid);
1377 /*
1378 * Error handling for btrfs_reloc_clone_csums().
1379 *
1380 * Treat the range as finished, thus only clear EXTENT_LOCKED | EXTENT_DELALLOC.
1381 * The accounting will be done by ordered extents.
1382 */
1383 if (unlikely(ret < 0)) {
1384 btrfs_cleanup_ordered_extents(inode, file_offset, cur_len);
1385 extent_clear_unlock_delalloc(inode, file_offset, cur_end, locked_folio, cached,
1386 EXTENT_LOCKED | EXTENT_DELALLOC,
1387 PAGE_UNLOCK | PAGE_START_WRITEBACK |
1388 PAGE_END_WRITEBACK);
1389 mapping_set_error(inode->vfs_inode.i_mapping, -EIO);
1390 }
1391 *ret_alloc_size = cur_len;
1392 return ret;
1393
1394 free_reserved:
1395 extent_clear_unlock_delalloc(inode, file_offset, cur_end, locked_folio, cached,
1396 EXTENT_LOCKED | EXTENT_DELALLOC |
1397 EXTENT_DELALLOC_NEW |
1398 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
1399 PAGE_UNLOCK | PAGE_START_WRITEBACK |
1400 PAGE_END_WRITEBACK);
1401 btrfs_qgroup_free_data(inode, NULL, file_offset, cur_len, NULL);
1402 btrfs_dec_block_group_reservations(fs_info, ins->objectid);
1403 btrfs_free_reserved_extent(fs_info, ins->objectid, ins->offset, true);
1404 mapping_set_error(inode->vfs_inode.i_mapping, -EIO);
1405 *ret_alloc_size = cur_len;
1406 /*
1407 * We should not return -EAGAIN where it's a special return code for
1408 * zoned to catch btrfs_reserved_extent().
1409 */
1410 ASSERT(ret != -EAGAIN);
1411 return ret;
1412 }
1413
1414 /*
1415 * when extent_io.c finds a delayed allocation range in the file,
1416 * the call backs end up in this code. The basic idea is to
1417 * allocate extents on disk for the range, and create ordered data structs
1418 * in ram to track those extents.
1419 *
1420 * locked_folio is the folio that writepage had locked already. We use
1421 * it to make sure we don't do extra locks or unlocks.
1422 *
1423 * When this function fails, it unlocks all folios except @locked_folio.
1424 *
1425 * When this function successfully creates an inline extent, it returns 1 and
1426 * unlocks all folios including locked_folio and starts I/O on them.
1427 * (In reality inline extents are limited to a single block, so locked_folio is
1428 * the only folio handled anyway).
1429 *
1430 * When this function succeed and creates a normal extent, the folio locking
1431 * status depends on the passed in flags:
1432 *
1433 * - If COW_FILE_RANGE_KEEP_LOCKED flag is set, all folios are kept locked.
1434 * - Else all folios except for @locked_folio are unlocked.
1435 *
1436 * When a failure happens in the second or later iteration of the
1437 * while-loop, the ordered extents created in previous iterations are cleaned up.
1438 */
cow_file_range(struct btrfs_inode * inode,struct folio * locked_folio,u64 start,u64 end,u64 * done_offset,unsigned long flags)1439 static noinline int cow_file_range(struct btrfs_inode *inode,
1440 struct folio *locked_folio, u64 start,
1441 u64 end, u64 *done_offset,
1442 unsigned long flags)
1443 {
1444 struct btrfs_root *root = inode->root;
1445 struct btrfs_fs_info *fs_info = root->fs_info;
1446 struct extent_state *cached = NULL;
1447 u64 alloc_hint = 0;
1448 u64 orig_start = start;
1449 u64 num_bytes;
1450 u32 min_alloc_size;
1451 u32 blocksize = fs_info->sectorsize;
1452 u32 cur_alloc_size = 0;
1453 struct btrfs_key ins;
1454 unsigned clear_bits;
1455 unsigned long page_ops;
1456 int ret = 0;
1457
1458 if (btrfs_is_shutdown(fs_info)) {
1459 ret = -EIO;
1460 goto out_unlock;
1461 }
1462
1463 if (btrfs_is_free_space_inode(inode)) {
1464 ret = -EINVAL;
1465 goto out_unlock;
1466 }
1467
1468 num_bytes = ALIGN(end - start + 1, blocksize);
1469 num_bytes = max(blocksize, num_bytes);
1470 ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy));
1471
1472 inode_should_defrag(inode, start, end, num_bytes, SZ_64K);
1473
1474 if (!(flags & COW_FILE_RANGE_NO_INLINE)) {
1475 /* lets try to make an inline extent */
1476 ret = cow_file_range_inline(inode, locked_folio, start, end, 0,
1477 BTRFS_COMPRESS_NONE, NULL, false);
1478 if (ret <= 0) {
1479 /*
1480 * We succeeded, return 1 so the caller knows we're done
1481 * with this page and already handled the IO.
1482 *
1483 * If there was an error then cow_file_range_inline() has
1484 * already done the cleanup.
1485 */
1486 if (ret == 0)
1487 ret = 1;
1488 goto done;
1489 }
1490 }
1491
1492 alloc_hint = btrfs_get_extent_allocation_hint(inode, start, num_bytes);
1493
1494 /*
1495 * We're not doing compressed IO, don't unlock the first page (which
1496 * the caller expects to stay locked), don't clear any dirty bits and
1497 * don't set any writeback bits.
1498 *
1499 * Do set the Ordered (Private2) bit so we know this page was properly
1500 * setup for writepage.
1501 */
1502 page_ops = ((flags & COW_FILE_RANGE_KEEP_LOCKED) ? 0 : PAGE_UNLOCK);
1503 page_ops |= PAGE_SET_ORDERED;
1504
1505 /*
1506 * Relocation relies on the relocated extents to have exactly the same
1507 * size as the original extents. Normally writeback for relocation data
1508 * extents follows a NOCOW path because relocation preallocates the
1509 * extents. However, due to an operation such as scrub turning a block
1510 * group to RO mode, it may fallback to COW mode, so we must make sure
1511 * an extent allocated during COW has exactly the requested size and can
1512 * not be split into smaller extents, otherwise relocation breaks and
1513 * fails during the stage where it updates the bytenr of file extent
1514 * items.
1515 */
1516 if (btrfs_is_data_reloc_root(root))
1517 min_alloc_size = num_bytes;
1518 else
1519 min_alloc_size = fs_info->sectorsize;
1520
1521 while (num_bytes > 0) {
1522 ret = cow_one_range(inode, locked_folio, &ins, &cached, start,
1523 num_bytes, min_alloc_size, alloc_hint, &cur_alloc_size);
1524
1525 if (ret == -EAGAIN) {
1526 /*
1527 * cow_one_range() only returns -EAGAIN for zoned
1528 * file systems (from btrfs_reserve_extent()), which
1529 * is an indication that there are
1530 * no active zones to allocate from at the moment.
1531 *
1532 * If this is the first loop iteration, wait for at
1533 * least one zone to finish before retrying the
1534 * allocation. Otherwise ask the caller to write out
1535 * the already allocated blocks before coming back to
1536 * us, or return -ENOSPC if it can't handle retries.
1537 */
1538 ASSERT(btrfs_is_zoned(fs_info));
1539 if (start == orig_start) {
1540 wait_on_bit_io(&inode->root->fs_info->flags,
1541 BTRFS_FS_NEED_ZONE_FINISH,
1542 TASK_UNINTERRUPTIBLE);
1543 continue;
1544 }
1545 if (done_offset) {
1546 /*
1547 * Move @end to the end of the processed range,
1548 * and exit the loop to unlock the processed extents.
1549 */
1550 end = start - 1;
1551 ret = 0;
1552 break;
1553 }
1554 ret = -ENOSPC;
1555 }
1556 if (ret < 0)
1557 goto out_unlock;
1558
1559 /* We should not allocate an extent larger than requested.*/
1560 ASSERT(cur_alloc_size <= num_bytes);
1561
1562 num_bytes -= cur_alloc_size;
1563 alloc_hint = ins.objectid + ins.offset;
1564 start += cur_alloc_size;
1565 cur_alloc_size = 0;
1566 }
1567 extent_clear_unlock_delalloc(inode, orig_start, end, locked_folio, &cached,
1568 EXTENT_LOCKED | EXTENT_DELALLOC, page_ops);
1569 done:
1570 if (done_offset)
1571 *done_offset = end;
1572 return ret;
1573
1574 out_unlock:
1575 /*
1576 * Now, we have three regions to clean up:
1577 *
1578 * |-------(1)----|---(2)---|-------------(3)----------|
1579 * `- orig_start `- start `- start + cur_alloc_size `- end
1580 *
1581 * We process each region below.
1582 */
1583
1584 /*
1585 * For the range (1). We have already instantiated the ordered extents
1586 * for this region, thus we need to cleanup those ordered extents.
1587 * EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV
1588 * are also handled by the ordered extents cleanup.
1589 *
1590 * So here we only clear EXTENT_LOCKED and EXTENT_DELALLOC flag, and
1591 * finish the writeback of the involved folios, which will be never submitted.
1592 */
1593 if (orig_start < start) {
1594 clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC;
1595 page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
1596
1597 if (!locked_folio)
1598 mapping_set_error(inode->vfs_inode.i_mapping, ret);
1599
1600 btrfs_cleanup_ordered_extents(inode, orig_start, start - orig_start);
1601 extent_clear_unlock_delalloc(inode, orig_start, start - 1,
1602 locked_folio, NULL, clear_bits, page_ops);
1603 }
1604
1605 clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
1606 EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
1607 page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
1608
1609 /*
1610 * For the range (2) the error handling is done by cow_one_range() itself.
1611 * Nothing needs to be done.
1612 *
1613 * For the range (3). We never touched the region. In addition to the
1614 * clear_bits above, we add EXTENT_CLEAR_DATA_RESV to release the data
1615 * space_info's bytes_may_use counter, reserved in
1616 * btrfs_check_data_free_space().
1617 */
1618 if (start + cur_alloc_size < end) {
1619 clear_bits |= EXTENT_CLEAR_DATA_RESV;
1620 extent_clear_unlock_delalloc(inode, start + cur_alloc_size,
1621 end, locked_folio,
1622 &cached, clear_bits, page_ops);
1623 btrfs_qgroup_free_data(inode, NULL, start + cur_alloc_size,
1624 end - start - cur_alloc_size + 1, NULL);
1625 }
1626 btrfs_err(fs_info,
1627 "%s failed, root=%llu inode=%llu start=%llu len=%llu cur_offset=%llu cur_alloc_size=%u: %d",
1628 __func__, btrfs_root_id(inode->root),
1629 btrfs_ino(inode), orig_start, end + 1 - orig_start,
1630 start, cur_alloc_size, ret);
1631 return ret;
1632 }
1633
1634 /*
1635 * Phase two of compressed writeback. This is the ordered portion of the code,
1636 * which only gets called in the order the work was queued. We walk all the
1637 * async extents created by compress_file_range and send them down to the disk.
1638 *
1639 * If called with @do_free == true then it'll try to finish the work and free
1640 * the work struct eventually.
1641 */
submit_compressed_extents(struct btrfs_work * work,bool do_free)1642 static noinline void submit_compressed_extents(struct btrfs_work *work, bool do_free)
1643 {
1644 struct async_chunk *async_chunk = container_of(work, struct async_chunk,
1645 work);
1646 struct btrfs_fs_info *fs_info = btrfs_work_owner(work);
1647 struct async_extent *async_extent;
1648 unsigned long nr_pages;
1649 u64 alloc_hint = 0;
1650
1651 if (do_free) {
1652 struct async_cow *async_cow;
1653
1654 btrfs_add_delayed_iput(async_chunk->inode);
1655 if (async_chunk->blkcg_css)
1656 css_put(async_chunk->blkcg_css);
1657
1658 async_cow = async_chunk->async_cow;
1659 if (atomic_dec_and_test(&async_cow->num_chunks))
1660 kvfree(async_cow);
1661 return;
1662 }
1663
1664 nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
1665 PAGE_SHIFT;
1666
1667 while (!list_empty(&async_chunk->extents)) {
1668 async_extent = list_first_entry(&async_chunk->extents,
1669 struct async_extent, list);
1670 list_del(&async_extent->list);
1671 submit_one_async_extent(async_chunk, async_extent, &alloc_hint);
1672 }
1673
1674 /* atomic_sub_return implies a barrier */
1675 if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
1676 5 * SZ_1M)
1677 cond_wake_up_nomb(&fs_info->async_submit_wait);
1678 }
1679
run_delalloc_compressed(struct btrfs_inode * inode,struct folio * locked_folio,u64 start,u64 end,struct writeback_control * wbc)1680 static bool run_delalloc_compressed(struct btrfs_inode *inode,
1681 struct folio *locked_folio, u64 start,
1682 u64 end, struct writeback_control *wbc)
1683 {
1684 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1685 struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc);
1686 struct async_cow *ctx;
1687 struct async_chunk *async_chunk;
1688 unsigned long nr_pages;
1689 u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K);
1690 int i;
1691 unsigned nofs_flag;
1692 const blk_opf_t write_flags = wbc_to_write_flags(wbc);
1693
1694 nofs_flag = memalloc_nofs_save();
1695 ctx = kvmalloc_flex(*ctx, chunks, num_chunks);
1696 memalloc_nofs_restore(nofs_flag);
1697 if (!ctx)
1698 return false;
1699
1700 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags);
1701
1702 async_chunk = ctx->chunks;
1703 atomic_set(&ctx->num_chunks, num_chunks);
1704
1705 for (i = 0; i < num_chunks; i++) {
1706 u64 cur_end = min(end, start + SZ_512K - 1);
1707
1708 /*
1709 * igrab is called higher up in the call chain, take only the
1710 * lightweight reference for the callback lifetime
1711 */
1712 ihold(&inode->vfs_inode);
1713 async_chunk[i].async_cow = ctx;
1714 async_chunk[i].inode = inode;
1715 async_chunk[i].start = start;
1716 async_chunk[i].end = cur_end;
1717 async_chunk[i].write_flags = write_flags;
1718 INIT_LIST_HEAD(&async_chunk[i].extents);
1719
1720 /*
1721 * The locked_folio comes all the way from writepage and its
1722 * the original folio we were actually given. As we spread
1723 * this large delalloc region across multiple async_chunk
1724 * structs, only the first struct needs a pointer to
1725 * locked_folio.
1726 *
1727 * This way we don't need racey decisions about who is supposed
1728 * to unlock it.
1729 */
1730 if (locked_folio) {
1731 /*
1732 * Depending on the compressibility, the pages might or
1733 * might not go through async. We want all of them to
1734 * be accounted against wbc once. Let's do it here
1735 * before the paths diverge. wbc accounting is used
1736 * only for foreign writeback detection and doesn't
1737 * need full accuracy. Just account the whole thing
1738 * against the first page.
1739 */
1740 wbc_account_cgroup_owner(wbc, locked_folio,
1741 cur_end - start);
1742 async_chunk[i].locked_folio = locked_folio;
1743 locked_folio = NULL;
1744 } else {
1745 async_chunk[i].locked_folio = NULL;
1746 }
1747
1748 if (blkcg_css != blkcg_root_css) {
1749 css_get(blkcg_css);
1750 async_chunk[i].blkcg_css = blkcg_css;
1751 async_chunk[i].write_flags |= REQ_BTRFS_CGROUP_PUNT;
1752 } else {
1753 async_chunk[i].blkcg_css = NULL;
1754 }
1755
1756 btrfs_init_work(&async_chunk[i].work, compress_file_range,
1757 submit_compressed_extents);
1758
1759 nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
1760 atomic_add(nr_pages, &fs_info->async_delalloc_pages);
1761
1762 btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work);
1763
1764 start = cur_end + 1;
1765 }
1766 return true;
1767 }
1768
1769 /*
1770 * Run the delalloc range from start to end, and write back any dirty pages
1771 * covered by the range.
1772 */
run_delalloc_cow(struct btrfs_inode * inode,struct folio * locked_folio,u64 start,u64 end,struct writeback_control * wbc,bool pages_dirty)1773 static noinline int run_delalloc_cow(struct btrfs_inode *inode,
1774 struct folio *locked_folio, u64 start,
1775 u64 end, struct writeback_control *wbc,
1776 bool pages_dirty)
1777 {
1778 u64 done_offset = end;
1779 int ret;
1780
1781 while (start <= end) {
1782 ret = cow_file_range(inode, locked_folio, start, end,
1783 &done_offset, COW_FILE_RANGE_KEEP_LOCKED);
1784 if (ret)
1785 return ret;
1786 extent_write_locked_range(&inode->vfs_inode, locked_folio,
1787 start, done_offset, wbc, pages_dirty);
1788 start = done_offset + 1;
1789 }
1790
1791 return 1;
1792 }
1793
fallback_to_cow(struct btrfs_inode * inode,struct folio * locked_folio,const u64 start,const u64 end)1794 static int fallback_to_cow(struct btrfs_inode *inode,
1795 struct folio *locked_folio, const u64 start,
1796 const u64 end)
1797 {
1798 const bool is_space_ino = btrfs_is_free_space_inode(inode);
1799 const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root);
1800 const u64 range_bytes = end + 1 - start;
1801 struct extent_io_tree *io_tree = &inode->io_tree;
1802 struct extent_state *cached_state = NULL;
1803 u64 range_start = start;
1804 u64 count;
1805 int ret;
1806
1807 /*
1808 * If EXTENT_NORESERVE is set it means that when the buffered write was
1809 * made we had not enough available data space and therefore we did not
1810 * reserve data space for it, since we though we could do NOCOW for the
1811 * respective file range (either there is prealloc extent or the inode
1812 * has the NOCOW bit set).
1813 *
1814 * However when we need to fallback to COW mode (because for example the
1815 * block group for the corresponding extent was turned to RO mode by a
1816 * scrub or relocation) we need to do the following:
1817 *
1818 * 1) We increment the bytes_may_use counter of the data space info.
1819 * If COW succeeds, it allocates a new data extent and after doing
1820 * that it decrements the space info's bytes_may_use counter and
1821 * increments its bytes_reserved counter by the same amount (we do
1822 * this at btrfs_add_reserved_bytes()). So we need to increment the
1823 * bytes_may_use counter to compensate (when space is reserved at
1824 * buffered write time, the bytes_may_use counter is incremented);
1825 *
1826 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so
1827 * that if the COW path fails for any reason, it decrements (through
1828 * extent_clear_unlock_delalloc()) the bytes_may_use counter of the
1829 * data space info, which we incremented in the step above.
1830 *
1831 * If we need to fallback to cow and the inode corresponds to a free
1832 * space cache inode or an inode of the data relocation tree, we must
1833 * also increment bytes_may_use of the data space_info for the same
1834 * reason. Space caches and relocated data extents always get a prealloc
1835 * extent for them, however scrub or balance may have set the block
1836 * group that contains that extent to RO mode and therefore force COW
1837 * when starting writeback.
1838 */
1839 btrfs_lock_extent(io_tree, start, end, &cached_state);
1840 count = btrfs_count_range_bits(io_tree, &range_start, end, range_bytes,
1841 EXTENT_NORESERVE, 0, NULL);
1842 if (count > 0 || is_space_ino || is_reloc_ino) {
1843 u64 bytes = count;
1844 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1845 struct btrfs_space_info *sinfo = fs_info->data_sinfo;
1846
1847 if (is_space_ino || is_reloc_ino)
1848 bytes = range_bytes;
1849
1850 spin_lock(&sinfo->lock);
1851 btrfs_space_info_update_bytes_may_use(sinfo, bytes);
1852 spin_unlock(&sinfo->lock);
1853
1854 if (count > 0)
1855 btrfs_clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE,
1856 &cached_state);
1857 }
1858 btrfs_unlock_extent(io_tree, start, end, &cached_state);
1859
1860 /*
1861 * Don't try to create inline extents, as a mix of inline extent that
1862 * is written out and unlocked directly and a normal NOCOW extent
1863 * doesn't work.
1864 *
1865 * And here we do not unlock the folio after a successful run.
1866 * The folios will be unlocked after everything is finished, or by error handling.
1867 *
1868 * This is to ensure error handling won't need to clear dirty/ordered flags without
1869 * a locked folio, which can race with writeback.
1870 */
1871 ret = cow_file_range(inode, locked_folio, start, end, NULL,
1872 COW_FILE_RANGE_NO_INLINE | COW_FILE_RANGE_KEEP_LOCKED);
1873 ASSERT(ret != 1);
1874 return ret;
1875 }
1876
1877 struct can_nocow_file_extent_args {
1878 /* Input fields. */
1879
1880 /* Start file offset of the range we want to NOCOW. */
1881 u64 start;
1882 /* End file offset (inclusive) of the range we want to NOCOW. */
1883 u64 end;
1884 bool writeback_path;
1885 /*
1886 * Free the path passed to can_nocow_file_extent() once it's not needed
1887 * anymore.
1888 */
1889 bool free_path;
1890
1891 /*
1892 * Output fields. Only set when can_nocow_file_extent() returns 1.
1893 * The expected file extent for the NOCOW write.
1894 */
1895 struct btrfs_file_extent file_extent;
1896 };
1897
1898 /*
1899 * Check if we can NOCOW the file extent that the path points to.
1900 * This function may return with the path released, so the caller should check
1901 * if path->nodes[0] is NULL or not if it needs to use the path afterwards.
1902 *
1903 * Returns: < 0 on error
1904 * 0 if we can not NOCOW
1905 * 1 if we can NOCOW
1906 */
can_nocow_file_extent(struct btrfs_path * path,struct btrfs_key * key,struct btrfs_inode * inode,struct can_nocow_file_extent_args * args)1907 static int can_nocow_file_extent(struct btrfs_path *path,
1908 struct btrfs_key *key,
1909 struct btrfs_inode *inode,
1910 struct can_nocow_file_extent_args *args)
1911 {
1912 const bool is_freespace_inode = btrfs_is_free_space_inode(inode);
1913 struct extent_buffer *leaf = path->nodes[0];
1914 struct btrfs_root *root = inode->root;
1915 struct btrfs_file_extent_item *fi;
1916 struct btrfs_root *csum_root;
1917 u64 io_start;
1918 u64 extent_end;
1919 u8 extent_type;
1920 int can_nocow = 0;
1921 int ret = 0;
1922 bool nowait = path->nowait;
1923
1924 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
1925 extent_type = btrfs_file_extent_type(leaf, fi);
1926
1927 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
1928 goto out;
1929
1930 if (!(inode->flags & BTRFS_INODE_NODATACOW) &&
1931 extent_type == BTRFS_FILE_EXTENT_REG)
1932 goto out;
1933
1934 /*
1935 * If the extent was created before the generation where the last snapshot
1936 * for its subvolume was created, then this implies the extent is shared,
1937 * hence we must COW.
1938 */
1939 if (btrfs_file_extent_generation(leaf, fi) <=
1940 btrfs_root_last_snapshot(&root->root_item))
1941 goto out;
1942
1943 /* An explicit hole, must COW. */
1944 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
1945 goto out;
1946
1947 /* Compressed/encrypted/encoded extents must be COWed. */
1948 if (btrfs_file_extent_compression(leaf, fi) ||
1949 btrfs_file_extent_encryption(leaf, fi) ||
1950 btrfs_file_extent_other_encoding(leaf, fi))
1951 goto out;
1952
1953 extent_end = btrfs_file_extent_end(path);
1954
1955 args->file_extent.disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1956 args->file_extent.disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1957 args->file_extent.ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1958 args->file_extent.offset = btrfs_file_extent_offset(leaf, fi);
1959 args->file_extent.compression = btrfs_file_extent_compression(leaf, fi);
1960
1961 /*
1962 * The following checks can be expensive, as they need to take other
1963 * locks and do btree or rbtree searches, so release the path to avoid
1964 * blocking other tasks for too long.
1965 */
1966 btrfs_release_path(path);
1967
1968 ret = btrfs_cross_ref_exist(inode, key->offset - args->file_extent.offset,
1969 args->file_extent.disk_bytenr, path);
1970 WARN_ON_ONCE(ret > 0 && is_freespace_inode);
1971 if (ret != 0)
1972 goto out;
1973
1974 if (args->free_path) {
1975 /*
1976 * We don't need the path anymore, plus through the
1977 * btrfs_lookup_csums_list() call below we will end up allocating
1978 * another path. So free the path to avoid unnecessary extra
1979 * memory usage.
1980 */
1981 btrfs_free_path(path);
1982 path = NULL;
1983 }
1984
1985 /* If there are pending snapshots for this root, we must COW. */
1986 if (args->writeback_path && !is_freespace_inode &&
1987 atomic_read(&root->snapshot_force_cow))
1988 goto out;
1989
1990 args->file_extent.num_bytes = min(args->end + 1, extent_end) - args->start;
1991 args->file_extent.offset += args->start - key->offset;
1992 io_start = args->file_extent.disk_bytenr + args->file_extent.offset;
1993
1994 /*
1995 * Force COW if csums exist in the range. This ensures that csums for a
1996 * given extent are either valid or do not exist.
1997 */
1998
1999 csum_root = btrfs_csum_root(root->fs_info, io_start);
2000 ret = btrfs_lookup_csums_list(csum_root, io_start,
2001 io_start + args->file_extent.num_bytes - 1,
2002 NULL, nowait);
2003 WARN_ON_ONCE(ret > 0 && is_freespace_inode);
2004 if (ret != 0)
2005 goto out;
2006
2007 can_nocow = 1;
2008 out:
2009 if (args->free_path && path)
2010 btrfs_free_path(path);
2011
2012 return ret < 0 ? ret : can_nocow;
2013 }
2014
nocow_one_range(struct btrfs_inode * inode,struct folio * locked_folio,struct extent_state ** cached,struct can_nocow_file_extent_args * nocow_args,u64 file_pos,bool is_prealloc)2015 static int nocow_one_range(struct btrfs_inode *inode, struct folio *locked_folio,
2016 struct extent_state **cached,
2017 struct can_nocow_file_extent_args *nocow_args,
2018 u64 file_pos, bool is_prealloc)
2019 {
2020 struct btrfs_ordered_extent *ordered;
2021 const u64 len = nocow_args->file_extent.num_bytes;
2022 const u64 end = file_pos + len - 1;
2023 int ret = 0;
2024
2025 btrfs_lock_extent(&inode->io_tree, file_pos, end, cached);
2026
2027 if (is_prealloc) {
2028 struct extent_map *em;
2029
2030 em = btrfs_create_io_em(inode, file_pos, &nocow_args->file_extent,
2031 BTRFS_ORDERED_PREALLOC);
2032 if (IS_ERR(em)) {
2033 ret = PTR_ERR(em);
2034 goto error;
2035 }
2036 btrfs_free_extent_map(em);
2037 }
2038
2039 ordered = btrfs_alloc_ordered_extent(inode, file_pos, &nocow_args->file_extent,
2040 is_prealloc
2041 ? (1U << BTRFS_ORDERED_PREALLOC)
2042 : (1U << BTRFS_ORDERED_NOCOW));
2043 if (IS_ERR(ordered)) {
2044 if (is_prealloc)
2045 btrfs_drop_extent_map_range(inode, file_pos, end, false);
2046 ret = PTR_ERR(ordered);
2047 goto error;
2048 }
2049
2050 if (btrfs_is_data_reloc_root(inode->root))
2051 /*
2052 * Errors are handled later, as we must prevent
2053 * extent_clear_unlock_delalloc() in error handler from freeing
2054 * metadata of the created ordered extent.
2055 */
2056 ret = btrfs_reloc_clone_csums(ordered);
2057 btrfs_put_ordered_extent(ordered);
2058
2059 if (ret < 0)
2060 goto error;
2061 extent_clear_unlock_delalloc(inode, file_pos, end, locked_folio, cached,
2062 EXTENT_LOCKED | EXTENT_DELALLOC |
2063 EXTENT_CLEAR_DATA_RESV,
2064 PAGE_SET_ORDERED);
2065 return ret;
2066
2067 error:
2068 btrfs_cleanup_ordered_extents(inode, file_pos, len);
2069 extent_clear_unlock_delalloc(inode, file_pos, end, locked_folio, cached,
2070 EXTENT_LOCKED | EXTENT_DELALLOC |
2071 EXTENT_CLEAR_DATA_RESV,
2072 PAGE_UNLOCK | PAGE_START_WRITEBACK |
2073 PAGE_END_WRITEBACK);
2074 btrfs_err(inode->root->fs_info,
2075 "%s failed, root=%lld inode=%llu start=%llu len=%llu: %d",
2076 __func__, btrfs_root_id(inode->root), btrfs_ino(inode),
2077 file_pos, len, ret);
2078 return ret;
2079 }
2080
2081 /*
2082 * When nocow writeback calls back. This checks for snapshots or COW copies
2083 * of the extents that exist in the file, and COWs the file as required.
2084 *
2085 * If no cow copies or snapshots exist, we write directly to the existing
2086 * blocks on disk
2087 */
run_delalloc_nocow(struct btrfs_inode * inode,struct folio * locked_folio,const u64 start,const u64 end)2088 static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
2089 struct folio *locked_folio,
2090 const u64 start, const u64 end)
2091 {
2092 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2093 struct btrfs_root *root = inode->root;
2094 struct btrfs_path *path = NULL;
2095 u64 cow_start = (u64)-1;
2096 /*
2097 * If not 0, represents the inclusive end of the last fallback_to_cow()
2098 * range. Only for error handling.
2099 *
2100 * The same for nocow_end, it's to avoid double cleaning up the range
2101 * already cleaned by nocow_one_range().
2102 */
2103 u64 cow_end = 0;
2104 u64 nocow_end = 0;
2105 u64 cur_offset = start;
2106 int ret;
2107 bool check_prev = true;
2108 u64 ino = btrfs_ino(inode);
2109 struct can_nocow_file_extent_args nocow_args = { 0 };
2110 /* The range that has ordered extent(s). */
2111 u64 oe_cleanup_start;
2112 u64 oe_cleanup_len = 0;
2113 /* The range that is untouched. */
2114 u64 untouched_start;
2115 u64 untouched_len = 0;
2116
2117 /*
2118 * Normally on a zoned device we're only doing COW writes, but in case
2119 * of relocation on a zoned filesystem serializes I/O so that we're only
2120 * writing sequentially and can end up here as well.
2121 */
2122 ASSERT(!btrfs_is_zoned(fs_info) || btrfs_is_data_reloc_root(root));
2123
2124 if (btrfs_is_shutdown(fs_info)) {
2125 ret = -EIO;
2126 goto error;
2127 }
2128 path = btrfs_alloc_path();
2129 if (!path) {
2130 ret = -ENOMEM;
2131 goto error;
2132 }
2133
2134 nocow_args.end = end;
2135 nocow_args.writeback_path = true;
2136
2137 while (cur_offset <= end) {
2138 struct btrfs_block_group *nocow_bg = NULL;
2139 struct btrfs_key found_key;
2140 struct btrfs_file_extent_item *fi;
2141 struct extent_buffer *leaf;
2142 struct extent_state *cached_state = NULL;
2143 u64 extent_end;
2144 int extent_type;
2145
2146 ret = btrfs_lookup_file_extent(NULL, root, path, ino,
2147 cur_offset, 0);
2148 if (ret < 0)
2149 goto error;
2150
2151 /*
2152 * If there is no extent for our range when doing the initial
2153 * search, then go back to the previous slot as it will be the
2154 * one containing the search offset
2155 */
2156 if (ret > 0 && path->slots[0] > 0 && check_prev) {
2157 leaf = path->nodes[0];
2158 btrfs_item_key_to_cpu(leaf, &found_key,
2159 path->slots[0] - 1);
2160 if (found_key.objectid == ino &&
2161 found_key.type == BTRFS_EXTENT_DATA_KEY)
2162 path->slots[0]--;
2163 }
2164 check_prev = false;
2165 next_slot:
2166 /* Go to next leaf if we have exhausted the current one */
2167 leaf = path->nodes[0];
2168 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2169 ret = btrfs_next_leaf(root, path);
2170 if (ret < 0)
2171 goto error;
2172 if (ret > 0)
2173 break;
2174 leaf = path->nodes[0];
2175 }
2176
2177 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2178
2179 /* Didn't find anything for our INO */
2180 if (found_key.objectid > ino)
2181 break;
2182 /*
2183 * Keep searching until we find an EXTENT_ITEM or there are no
2184 * more extents for this inode
2185 */
2186 if (WARN_ON_ONCE(found_key.objectid < ino) ||
2187 found_key.type < BTRFS_EXTENT_DATA_KEY) {
2188 path->slots[0]++;
2189 goto next_slot;
2190 }
2191
2192 /* Found key is not EXTENT_DATA_KEY or starts after req range */
2193 if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
2194 found_key.offset > end)
2195 break;
2196
2197 /*
2198 * If the found extent starts after requested offset, then
2199 * adjust cur_offset to be right before this extent begins.
2200 */
2201 if (found_key.offset > cur_offset) {
2202 if (cow_start == (u64)-1)
2203 cow_start = cur_offset;
2204 cur_offset = found_key.offset;
2205 goto next_slot;
2206 }
2207
2208 /*
2209 * Found extent which begins before our range and potentially
2210 * intersect it
2211 */
2212 fi = btrfs_item_ptr(leaf, path->slots[0],
2213 struct btrfs_file_extent_item);
2214 extent_type = btrfs_file_extent_type(leaf, fi);
2215 /* If this is triggered then we have a memory corruption. */
2216 ASSERT(extent_type < BTRFS_NR_FILE_EXTENT_TYPES);
2217 if (WARN_ON(extent_type >= BTRFS_NR_FILE_EXTENT_TYPES)) {
2218 ret = -EUCLEAN;
2219 goto error;
2220 }
2221 extent_end = btrfs_file_extent_end(path);
2222
2223 /*
2224 * If the extent we got ends before our current offset, skip to
2225 * the next extent.
2226 */
2227 if (extent_end <= cur_offset) {
2228 path->slots[0]++;
2229 goto next_slot;
2230 }
2231
2232 nocow_args.start = cur_offset;
2233 ret = can_nocow_file_extent(path, &found_key, inode, &nocow_args);
2234 if (ret < 0)
2235 goto error;
2236 if (ret == 0)
2237 goto must_cow;
2238
2239 ret = 0;
2240 nocow_bg = btrfs_inc_nocow_writers(fs_info,
2241 nocow_args.file_extent.disk_bytenr +
2242 nocow_args.file_extent.offset);
2243 if (!nocow_bg) {
2244 must_cow:
2245 /*
2246 * If we can't perform NOCOW writeback for the range,
2247 * then record the beginning of the range that needs to
2248 * be COWed. It will be written out before the next
2249 * NOCOW range if we find one, or when exiting this
2250 * loop.
2251 */
2252 if (cow_start == (u64)-1)
2253 cow_start = cur_offset;
2254 cur_offset = extent_end;
2255 if (cur_offset > end)
2256 break;
2257 if (!path->nodes[0])
2258 continue;
2259 path->slots[0]++;
2260 goto next_slot;
2261 }
2262
2263 /*
2264 * COW range from cow_start to found_key.offset - 1. As the key
2265 * will contain the beginning of the first extent that can be
2266 * NOCOW, following one which needs to be COW'ed
2267 */
2268 if (cow_start != (u64)-1) {
2269 ret = fallback_to_cow(inode, locked_folio, cow_start,
2270 found_key.offset - 1);
2271 if (ret) {
2272 cow_end = found_key.offset - 1;
2273 btrfs_dec_nocow_writers(nocow_bg);
2274 goto error;
2275 }
2276 cow_start = (u64)-1;
2277 }
2278
2279 ret = nocow_one_range(inode, locked_folio, &cached_state,
2280 &nocow_args, cur_offset,
2281 extent_type == BTRFS_FILE_EXTENT_PREALLOC);
2282 btrfs_dec_nocow_writers(nocow_bg);
2283 if (ret < 0) {
2284 nocow_end = cur_offset + nocow_args.file_extent.num_bytes - 1;
2285 goto error;
2286 }
2287 cur_offset = extent_end;
2288 }
2289 btrfs_release_path(path);
2290
2291 if (cur_offset <= end && cow_start == (u64)-1)
2292 cow_start = cur_offset;
2293
2294 if (cow_start != (u64)-1) {
2295 ret = fallback_to_cow(inode, locked_folio, cow_start, end);
2296 if (ret) {
2297 cow_end = end;
2298 goto error;
2299 }
2300 cow_start = (u64)-1;
2301 }
2302
2303 /*
2304 * Everything is finished without an error, can unlock the folios now.
2305 *
2306 * No need to touch the io tree range nor set folio ordered flag, as
2307 * fallback_to_cow() and nocow_one_range() have already handled them.
2308 */
2309 extent_clear_unlock_delalloc(inode, start, end, locked_folio, NULL, 0, PAGE_UNLOCK);
2310
2311 btrfs_free_path(path);
2312 return 0;
2313
2314 error:
2315 if (cow_start == (u64)-1) {
2316 /*
2317 * case a)
2318 * start cur_offset end
2319 * | OE cleanup | Untouched |
2320 *
2321 * We finished a fallback_to_cow() or nocow_one_range() call,
2322 * but failed to check the next range.
2323 *
2324 * or
2325 * start cur_offset nocow_end end
2326 * | OE cleanup | Skip | Untouched |
2327 *
2328 * nocow_one_range() failed, the range [cur_offset, nocow_end] is
2329 * already cleaned up.
2330 */
2331 oe_cleanup_start = start;
2332 oe_cleanup_len = cur_offset - start;
2333 if (nocow_end)
2334 untouched_start = nocow_end + 1;
2335 else
2336 untouched_start = cur_offset;
2337 untouched_len = end + 1 - untouched_start;
2338 } else if (cow_start != (u64)-1 && cow_end == 0) {
2339 /*
2340 * case b)
2341 * start cow_start cur_offset end
2342 * | OE cleanup | Untouched |
2343 *
2344 * We got a range that needs COW, but before we hit the next NOCOW range,
2345 * thus [cow_start, cur_offset) doesn't yet have any OE.
2346 */
2347 oe_cleanup_start = start;
2348 oe_cleanup_len = cow_start - start;
2349 untouched_start = cow_start;
2350 untouched_len = end + 1 - untouched_start;
2351 } else {
2352 /*
2353 * case c)
2354 * start cow_start cow_end end
2355 * | OE cleanup | Skip | Untouched |
2356 *
2357 * fallback_to_cow() failed, and fallback_to_cow() will do the
2358 * cleanup for its range, we shouldn't touch the range
2359 * [cow_start, cow_end].
2360 */
2361 ASSERT(cow_start != (u64)-1 && cow_end != 0);
2362 oe_cleanup_start = start;
2363 oe_cleanup_len = cow_start - start;
2364 untouched_start = cow_end + 1;
2365 untouched_len = end + 1 - untouched_start;
2366 }
2367
2368 if (oe_cleanup_len) {
2369 const u64 oe_cleanup_end = oe_cleanup_start + oe_cleanup_len - 1;
2370 btrfs_cleanup_ordered_extents(inode, oe_cleanup_start, oe_cleanup_len);
2371 extent_clear_unlock_delalloc(inode, oe_cleanup_start, oe_cleanup_end,
2372 locked_folio, NULL,
2373 EXTENT_LOCKED | EXTENT_DELALLOC,
2374 PAGE_UNLOCK | PAGE_START_WRITEBACK |
2375 PAGE_END_WRITEBACK);
2376 }
2377
2378 if (untouched_len) {
2379 struct extent_state *cached = NULL;
2380 const u64 untouched_end = untouched_start + untouched_len - 1;
2381
2382 /*
2383 * We need to lock the extent here because we're clearing DELALLOC and
2384 * we're not locked at this point.
2385 */
2386 btrfs_lock_extent(&inode->io_tree, untouched_start, untouched_end, &cached);
2387 extent_clear_unlock_delalloc(inode, untouched_start, untouched_end,
2388 locked_folio, &cached,
2389 EXTENT_LOCKED | EXTENT_DELALLOC |
2390 EXTENT_DEFRAG |
2391 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
2392 PAGE_START_WRITEBACK |
2393 PAGE_END_WRITEBACK);
2394 btrfs_qgroup_free_data(inode, NULL, untouched_start, untouched_len, NULL);
2395 }
2396 btrfs_free_path(path);
2397 btrfs_err(fs_info,
2398 "%s failed, root=%llu inode=%llu start=%llu len=%llu cur_offset=%llu oe_cleanup=%llu oe_cleanup_len=%llu untouched_start=%llu untouched_len=%llu: %d",
2399 __func__, btrfs_root_id(inode->root), btrfs_ino(inode),
2400 start, end + 1 - start, cur_offset, oe_cleanup_start, oe_cleanup_len,
2401 untouched_start, untouched_len, ret);
2402 return ret;
2403 }
2404
should_nocow(struct btrfs_inode * inode,u64 start,u64 end)2405 static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end)
2406 {
2407 if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) {
2408 if (inode->defrag_bytes &&
2409 btrfs_test_range_bit_exists(&inode->io_tree, start, end, EXTENT_DEFRAG))
2410 return false;
2411 return true;
2412 }
2413 return false;
2414 }
2415
2416 /*
2417 * Function to process delayed allocation (create CoW) for ranges which are
2418 * being touched for the first time.
2419 */
btrfs_run_delalloc_range(struct btrfs_inode * inode,struct folio * locked_folio,u64 start,u64 end,struct writeback_control * wbc)2420 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_folio,
2421 u64 start, u64 end, struct writeback_control *wbc)
2422 {
2423 const bool zoned = btrfs_is_zoned(inode->root->fs_info);
2424
2425 /*
2426 * The range must cover part of the @locked_folio, or a return of 1
2427 * can confuse the caller.
2428 */
2429 ASSERT(!(end <= folio_pos(locked_folio) ||
2430 start >= folio_next_pos(locked_folio)));
2431
2432 if (should_nocow(inode, start, end))
2433 return run_delalloc_nocow(inode, locked_folio, start, end);
2434
2435 if (btrfs_inode_can_compress(inode) &&
2436 inode_need_compress(inode, start, end) &&
2437 run_delalloc_compressed(inode, locked_folio, start, end, wbc))
2438 return 1;
2439
2440 if (zoned)
2441 return run_delalloc_cow(inode, locked_folio, start, end, wbc, true);
2442 else
2443 return cow_file_range(inode, locked_folio, start, end, NULL, 0);
2444 }
2445
btrfs_split_delalloc_extent(struct btrfs_inode * inode,struct extent_state * orig,u64 split)2446 void btrfs_split_delalloc_extent(struct btrfs_inode *inode,
2447 struct extent_state *orig, u64 split)
2448 {
2449 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2450 u64 size;
2451
2452 lockdep_assert_held(&inode->io_tree.lock);
2453
2454 /* not delalloc, ignore it */
2455 if (!(orig->state & EXTENT_DELALLOC))
2456 return;
2457
2458 size = orig->end - orig->start + 1;
2459 if (size > fs_info->max_extent_size) {
2460 u32 num_extents;
2461 u64 new_size;
2462
2463 /*
2464 * See the explanation in btrfs_merge_delalloc_extent, the same
2465 * applies here, just in reverse.
2466 */
2467 new_size = orig->end - split + 1;
2468 num_extents = count_max_extents(fs_info, new_size);
2469 new_size = split - orig->start;
2470 num_extents += count_max_extents(fs_info, new_size);
2471 if (count_max_extents(fs_info, size) >= num_extents)
2472 return;
2473 }
2474
2475 spin_lock(&inode->lock);
2476 btrfs_mod_outstanding_extents(inode, 1);
2477 spin_unlock(&inode->lock);
2478 }
2479
2480 /*
2481 * Handle merged delayed allocation extents so we can keep track of new extents
2482 * that are just merged onto old extents, such as when we are doing sequential
2483 * writes, so we can properly account for the metadata space we'll need.
2484 */
btrfs_merge_delalloc_extent(struct btrfs_inode * inode,struct extent_state * new,struct extent_state * other)2485 void btrfs_merge_delalloc_extent(struct btrfs_inode *inode, struct extent_state *new,
2486 struct extent_state *other)
2487 {
2488 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2489 u64 new_size, old_size;
2490 u32 num_extents;
2491
2492 lockdep_assert_held(&inode->io_tree.lock);
2493
2494 /* not delalloc, ignore it */
2495 if (!(other->state & EXTENT_DELALLOC))
2496 return;
2497
2498 if (new->start > other->start)
2499 new_size = new->end - other->start + 1;
2500 else
2501 new_size = other->end - new->start + 1;
2502
2503 /* we're not bigger than the max, unreserve the space and go */
2504 if (new_size <= fs_info->max_extent_size) {
2505 spin_lock(&inode->lock);
2506 btrfs_mod_outstanding_extents(inode, -1);
2507 spin_unlock(&inode->lock);
2508 return;
2509 }
2510
2511 /*
2512 * We have to add up either side to figure out how many extents were
2513 * accounted for before we merged into one big extent. If the number of
2514 * extents we accounted for is <= the amount we need for the new range
2515 * then we can return, otherwise drop. Think of it like this
2516 *
2517 * [ 4k][MAX_SIZE]
2518 *
2519 * So we've grown the extent by a MAX_SIZE extent, this would mean we
2520 * need 2 outstanding extents, on one side we have 1 and the other side
2521 * we have 1 so they are == and we can return. But in this case
2522 *
2523 * [MAX_SIZE+4k][MAX_SIZE+4k]
2524 *
2525 * Each range on their own accounts for 2 extents, but merged together
2526 * they are only 3 extents worth of accounting, so we need to drop in
2527 * this case.
2528 */
2529 old_size = other->end - other->start + 1;
2530 num_extents = count_max_extents(fs_info, old_size);
2531 old_size = new->end - new->start + 1;
2532 num_extents += count_max_extents(fs_info, old_size);
2533 if (count_max_extents(fs_info, new_size) >= num_extents)
2534 return;
2535
2536 spin_lock(&inode->lock);
2537 btrfs_mod_outstanding_extents(inode, -1);
2538 spin_unlock(&inode->lock);
2539 }
2540
btrfs_add_delalloc_inode(struct btrfs_inode * inode)2541 static void btrfs_add_delalloc_inode(struct btrfs_inode *inode)
2542 {
2543 struct btrfs_root *root = inode->root;
2544 struct btrfs_fs_info *fs_info = root->fs_info;
2545
2546 spin_lock(&root->delalloc_lock);
2547 ASSERT(list_empty(&inode->delalloc_inodes));
2548 list_add_tail(&inode->delalloc_inodes, &root->delalloc_inodes);
2549 root->nr_delalloc_inodes++;
2550 if (root->nr_delalloc_inodes == 1) {
2551 spin_lock(&fs_info->delalloc_root_lock);
2552 ASSERT(list_empty(&root->delalloc_root));
2553 list_add_tail(&root->delalloc_root, &fs_info->delalloc_roots);
2554 spin_unlock(&fs_info->delalloc_root_lock);
2555 }
2556 spin_unlock(&root->delalloc_lock);
2557 }
2558
btrfs_del_delalloc_inode(struct btrfs_inode * inode)2559 void btrfs_del_delalloc_inode(struct btrfs_inode *inode)
2560 {
2561 struct btrfs_root *root = inode->root;
2562 struct btrfs_fs_info *fs_info = root->fs_info;
2563
2564 lockdep_assert_held(&root->delalloc_lock);
2565
2566 /*
2567 * We may be called after the inode was already deleted from the list,
2568 * namely in the transaction abort path btrfs_destroy_delalloc_inodes(),
2569 * and then later through btrfs_clear_delalloc_extent() while the inode
2570 * still has ->delalloc_bytes > 0.
2571 */
2572 if (!list_empty(&inode->delalloc_inodes)) {
2573 list_del_init(&inode->delalloc_inodes);
2574 root->nr_delalloc_inodes--;
2575 if (!root->nr_delalloc_inodes) {
2576 ASSERT(list_empty(&root->delalloc_inodes));
2577 spin_lock(&fs_info->delalloc_root_lock);
2578 ASSERT(!list_empty(&root->delalloc_root));
2579 list_del_init(&root->delalloc_root);
2580 spin_unlock(&fs_info->delalloc_root_lock);
2581 }
2582 }
2583 }
2584
2585 /*
2586 * Properly track delayed allocation bytes in the inode and to maintain the
2587 * list of inodes that have pending delalloc work to be done.
2588 */
btrfs_set_delalloc_extent(struct btrfs_inode * inode,struct extent_state * state,u32 bits)2589 void btrfs_set_delalloc_extent(struct btrfs_inode *inode, struct extent_state *state,
2590 u32 bits)
2591 {
2592 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2593
2594 lockdep_assert_held(&inode->io_tree.lock);
2595
2596 if ((bits & EXTENT_DEFRAG) && !(bits & EXTENT_DELALLOC))
2597 WARN_ON(1);
2598 /*
2599 * set_bit and clear bit hooks normally require _irqsave/restore
2600 * but in this case, we are only testing for the DELALLOC
2601 * bit, which is only set or cleared with irqs on
2602 */
2603 if (!(state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
2604 u64 len = state->end + 1 - state->start;
2605 u64 prev_delalloc_bytes;
2606 u32 num_extents = count_max_extents(fs_info, len);
2607
2608 spin_lock(&inode->lock);
2609 btrfs_mod_outstanding_extents(inode, num_extents);
2610 spin_unlock(&inode->lock);
2611
2612 /* For sanity tests */
2613 if (btrfs_is_testing(fs_info))
2614 return;
2615
2616 percpu_counter_add_batch(&fs_info->delalloc_bytes, len,
2617 fs_info->delalloc_batch);
2618 spin_lock(&inode->lock);
2619 prev_delalloc_bytes = inode->delalloc_bytes;
2620 inode->delalloc_bytes += len;
2621 if (bits & EXTENT_DEFRAG)
2622 inode->defrag_bytes += len;
2623 spin_unlock(&inode->lock);
2624
2625 /*
2626 * We don't need to be under the protection of the inode's lock,
2627 * because we are called while holding the inode's io_tree lock
2628 * and are therefore protected against concurrent calls of this
2629 * function and btrfs_clear_delalloc_extent().
2630 */
2631 if (!btrfs_is_free_space_inode(inode) && prev_delalloc_bytes == 0)
2632 btrfs_add_delalloc_inode(inode);
2633 }
2634
2635 if (!(state->state & EXTENT_DELALLOC_NEW) &&
2636 (bits & EXTENT_DELALLOC_NEW)) {
2637 spin_lock(&inode->lock);
2638 inode->new_delalloc_bytes += state->end + 1 - state->start;
2639 spin_unlock(&inode->lock);
2640 }
2641 }
2642
2643 /*
2644 * Once a range is no longer delalloc this function ensures that proper
2645 * accounting happens.
2646 */
btrfs_clear_delalloc_extent(struct btrfs_inode * inode,struct extent_state * state,u32 bits)2647 void btrfs_clear_delalloc_extent(struct btrfs_inode *inode,
2648 struct extent_state *state, u32 bits)
2649 {
2650 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2651 u64 len = state->end + 1 - state->start;
2652 u32 num_extents = count_max_extents(fs_info, len);
2653
2654 lockdep_assert_held(&inode->io_tree.lock);
2655
2656 if ((state->state & EXTENT_DEFRAG) && (bits & EXTENT_DEFRAG)) {
2657 spin_lock(&inode->lock);
2658 inode->defrag_bytes -= len;
2659 spin_unlock(&inode->lock);
2660 }
2661
2662 /*
2663 * set_bit and clear bit hooks normally require _irqsave/restore
2664 * but in this case, we are only testing for the DELALLOC
2665 * bit, which is only set or cleared with irqs on
2666 */
2667 if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
2668 struct btrfs_root *root = inode->root;
2669 u64 new_delalloc_bytes;
2670
2671 spin_lock(&inode->lock);
2672 btrfs_mod_outstanding_extents(inode, -num_extents);
2673 spin_unlock(&inode->lock);
2674
2675 /*
2676 * We don't reserve metadata space for space cache inodes so we
2677 * don't need to call delalloc_release_metadata if there is an
2678 * error.
2679 */
2680 if (bits & EXTENT_CLEAR_META_RESV &&
2681 root != fs_info->tree_root)
2682 btrfs_delalloc_release_metadata(inode, len, true);
2683
2684 /* For sanity tests. */
2685 if (btrfs_is_testing(fs_info))
2686 return;
2687
2688 if (!btrfs_is_data_reloc_root(root) &&
2689 !btrfs_is_free_space_inode(inode) &&
2690 !(state->state & EXTENT_NORESERVE) &&
2691 (bits & EXTENT_CLEAR_DATA_RESV))
2692 btrfs_free_reserved_data_space_noquota(inode, len);
2693
2694 percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
2695 fs_info->delalloc_batch);
2696 spin_lock(&inode->lock);
2697 inode->delalloc_bytes -= len;
2698 new_delalloc_bytes = inode->delalloc_bytes;
2699 spin_unlock(&inode->lock);
2700
2701 /*
2702 * We don't need to be under the protection of the inode's lock,
2703 * because we are called while holding the inode's io_tree lock
2704 * and are therefore protected against concurrent calls of this
2705 * function and btrfs_set_delalloc_extent().
2706 */
2707 if (!btrfs_is_free_space_inode(inode) && new_delalloc_bytes == 0) {
2708 spin_lock(&root->delalloc_lock);
2709 btrfs_del_delalloc_inode(inode);
2710 spin_unlock(&root->delalloc_lock);
2711 }
2712 }
2713
2714 if ((state->state & EXTENT_DELALLOC_NEW) &&
2715 (bits & EXTENT_DELALLOC_NEW)) {
2716 spin_lock(&inode->lock);
2717 ASSERT(inode->new_delalloc_bytes >= len);
2718 inode->new_delalloc_bytes -= len;
2719 if (bits & EXTENT_ADD_INODE_BYTES)
2720 inode_add_bytes(&inode->vfs_inode, len);
2721 spin_unlock(&inode->lock);
2722 }
2723 }
2724
2725 /*
2726 * given a list of ordered sums record them in the inode. This happens
2727 * at IO completion time based on sums calculated at bio submission time.
2728 */
add_pending_csums(struct btrfs_trans_handle * trans,struct list_head * list)2729 static int add_pending_csums(struct btrfs_trans_handle *trans,
2730 struct list_head *list)
2731 {
2732 struct btrfs_ordered_sum *sum;
2733 struct btrfs_root *csum_root = NULL;
2734 int ret;
2735
2736 list_for_each_entry(sum, list, list) {
2737 trans->adding_csums = true;
2738 if (!csum_root)
2739 csum_root = btrfs_csum_root(trans->fs_info,
2740 sum->logical);
2741 ret = btrfs_csum_file_blocks(trans, csum_root, sum);
2742 trans->adding_csums = false;
2743 if (ret)
2744 return ret;
2745 }
2746 return 0;
2747 }
2748
btrfs_find_new_delalloc_bytes(struct btrfs_inode * inode,const u64 start,const u64 len,struct extent_state ** cached_state)2749 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
2750 const u64 start,
2751 const u64 len,
2752 struct extent_state **cached_state)
2753 {
2754 u64 search_start = start;
2755 const u64 end = start + len - 1;
2756
2757 while (search_start < end) {
2758 const u64 search_len = end - search_start + 1;
2759 struct extent_map *em;
2760 u64 em_len;
2761 int ret = 0;
2762
2763 em = btrfs_get_extent(inode, NULL, search_start, search_len);
2764 if (IS_ERR(em))
2765 return PTR_ERR(em);
2766
2767 if (em->disk_bytenr != EXTENT_MAP_HOLE)
2768 goto next;
2769
2770 em_len = em->len;
2771 if (em->start < search_start)
2772 em_len -= search_start - em->start;
2773 if (em_len > search_len)
2774 em_len = search_len;
2775
2776 ret = btrfs_set_extent_bit(&inode->io_tree, search_start,
2777 search_start + em_len - 1,
2778 EXTENT_DELALLOC_NEW, cached_state);
2779 next:
2780 search_start = btrfs_extent_map_end(em);
2781 btrfs_free_extent_map(em);
2782 if (ret)
2783 return ret;
2784 }
2785 return 0;
2786 }
2787
btrfs_set_extent_delalloc(struct btrfs_inode * inode,u64 start,u64 end,unsigned int extra_bits,struct extent_state ** cached_state)2788 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
2789 unsigned int extra_bits,
2790 struct extent_state **cached_state)
2791 {
2792 WARN_ON(PAGE_ALIGNED(end));
2793
2794 if (start >= i_size_read(&inode->vfs_inode) &&
2795 !(inode->flags & BTRFS_INODE_PREALLOC)) {
2796 /*
2797 * There can't be any extents following eof in this case so just
2798 * set the delalloc new bit for the range directly.
2799 */
2800 extra_bits |= EXTENT_DELALLOC_NEW;
2801 } else {
2802 int ret;
2803
2804 ret = btrfs_find_new_delalloc_bytes(inode, start,
2805 end + 1 - start,
2806 cached_state);
2807 if (ret)
2808 return ret;
2809 }
2810
2811 return btrfs_set_extent_bit(&inode->io_tree, start, end,
2812 EXTENT_DELALLOC | extra_bits, cached_state);
2813 }
2814
2815 /* see btrfs_writepage_start_hook for details on why this is required */
2816 struct btrfs_writepage_fixup {
2817 struct folio *folio;
2818 struct btrfs_inode *inode;
2819 struct btrfs_work work;
2820 };
2821
btrfs_writepage_fixup_worker(struct btrfs_work * work)2822 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
2823 {
2824 struct btrfs_writepage_fixup *fixup =
2825 container_of(work, struct btrfs_writepage_fixup, work);
2826 struct btrfs_ordered_extent *ordered;
2827 struct extent_state *cached_state = NULL;
2828 struct extent_changeset *data_reserved = NULL;
2829 struct folio *folio = fixup->folio;
2830 struct btrfs_inode *inode = fixup->inode;
2831 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2832 u64 page_start = folio_pos(folio);
2833 u64 page_end = folio_next_pos(folio) - 1;
2834 int ret = 0;
2835 bool free_delalloc_space = true;
2836
2837 /*
2838 * This is similar to page_mkwrite, we need to reserve the space before
2839 * we take the folio lock.
2840 */
2841 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
2842 folio_size(folio));
2843 again:
2844 folio_lock(folio);
2845
2846 /*
2847 * Before we queued this fixup, we took a reference on the folio.
2848 * folio->mapping may go NULL, but it shouldn't be moved to a different
2849 * address space.
2850 */
2851 if (!folio->mapping || !folio_test_dirty(folio) ||
2852 !folio_test_checked(folio)) {
2853 /*
2854 * Unfortunately this is a little tricky, either
2855 *
2856 * 1) We got here and our folio had already been dealt with and
2857 * we reserved our space, thus ret == 0, so we need to just
2858 * drop our space reservation and bail. This can happen the
2859 * first time we come into the fixup worker, or could happen
2860 * while waiting for the ordered extent.
2861 * 2) Our folio was already dealt with, but we happened to get an
2862 * ENOSPC above from the btrfs_delalloc_reserve_space. In
2863 * this case we obviously don't have anything to release, but
2864 * because the folio was already dealt with we don't want to
2865 * mark the folio with an error, so make sure we're resetting
2866 * ret to 0. This is why we have this check _before_ the ret
2867 * check, because we do not want to have a surprise ENOSPC
2868 * when the folio was already properly dealt with.
2869 */
2870 if (!ret) {
2871 btrfs_delalloc_release_extents(inode, folio_size(folio));
2872 btrfs_delalloc_release_space(inode, data_reserved,
2873 page_start, folio_size(folio),
2874 true);
2875 }
2876 ret = 0;
2877 goto out_page;
2878 }
2879
2880 /*
2881 * We can't mess with the folio state unless it is locked, so now that
2882 * it is locked bail if we failed to make our space reservation.
2883 */
2884 if (ret)
2885 goto out_page;
2886
2887 btrfs_lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
2888
2889 /* already ordered? We're done */
2890 if (folio_test_ordered(folio))
2891 goto out_reserved;
2892
2893 ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
2894 if (ordered) {
2895 btrfs_unlock_extent(&inode->io_tree, page_start, page_end,
2896 &cached_state);
2897 folio_unlock(folio);
2898 btrfs_start_ordered_extent(ordered);
2899 btrfs_put_ordered_extent(ordered);
2900 goto again;
2901 }
2902
2903 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
2904 &cached_state);
2905 if (ret)
2906 goto out_reserved;
2907
2908 /*
2909 * Everything went as planned, we're now the owner of a dirty page with
2910 * delayed allocation bits set and space reserved for our COW
2911 * destination.
2912 *
2913 * The page was dirty when we started, nothing should have cleaned it.
2914 */
2915 BUG_ON(!folio_test_dirty(folio));
2916 free_delalloc_space = false;
2917 out_reserved:
2918 btrfs_delalloc_release_extents(inode, PAGE_SIZE);
2919 if (free_delalloc_space)
2920 btrfs_delalloc_release_space(inode, data_reserved, page_start,
2921 PAGE_SIZE, true);
2922 btrfs_unlock_extent(&inode->io_tree, page_start, page_end, &cached_state);
2923 out_page:
2924 if (ret) {
2925 /*
2926 * We hit ENOSPC or other errors. Update the mapping and page
2927 * to reflect the errors and clean the page.
2928 */
2929 mapping_set_error(folio->mapping, ret);
2930 btrfs_mark_ordered_io_finished(inode, folio, page_start,
2931 folio_size(folio), !ret);
2932 folio_clear_dirty_for_io(folio);
2933 }
2934 btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE);
2935 folio_unlock(folio);
2936 folio_put(folio);
2937 kfree(fixup);
2938 extent_changeset_free(data_reserved);
2939 /*
2940 * As a precaution, do a delayed iput in case it would be the last iput
2941 * that could need flushing space. Recursing back to fixup worker would
2942 * deadlock.
2943 */
2944 btrfs_add_delayed_iput(inode);
2945 }
2946
2947 /*
2948 * There are a few paths in the higher layers of the kernel that directly
2949 * set the folio dirty bit without asking the filesystem if it is a
2950 * good idea. This causes problems because we want to make sure COW
2951 * properly happens and the data=ordered rules are followed.
2952 *
2953 * In our case any range that doesn't have the ORDERED bit set
2954 * hasn't been properly setup for IO. We kick off an async process
2955 * to fix it up. The async helper will wait for ordered extents, set
2956 * the delalloc bit and make it safe to write the folio.
2957 */
btrfs_writepage_cow_fixup(struct folio * folio)2958 int btrfs_writepage_cow_fixup(struct folio *folio)
2959 {
2960 struct inode *inode = folio->mapping->host;
2961 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
2962 struct btrfs_writepage_fixup *fixup;
2963
2964 /* This folio has ordered extent covering it already */
2965 if (folio_test_ordered(folio))
2966 return 0;
2967
2968 /*
2969 * For experimental build, we error out instead of EAGAIN.
2970 *
2971 * We should not hit such out-of-band dirty folios anymore.
2972 */
2973 if (IS_ENABLED(CONFIG_BTRFS_EXPERIMENTAL)) {
2974 DEBUG_WARN();
2975 btrfs_err_rl(fs_info,
2976 "root %lld ino %llu folio %llu is marked dirty without notifying the fs",
2977 btrfs_root_id(BTRFS_I(inode)->root),
2978 btrfs_ino(BTRFS_I(inode)),
2979 folio_pos(folio));
2980 return -EUCLEAN;
2981 }
2982
2983 /*
2984 * folio_checked is set below when we create a fixup worker for this
2985 * folio, don't try to create another one if we're already
2986 * folio_test_checked.
2987 *
2988 * The extent_io writepage code will redirty the foio if we send back
2989 * EAGAIN.
2990 */
2991 if (folio_test_checked(folio))
2992 return -EAGAIN;
2993
2994 fixup = kzalloc_obj(*fixup, GFP_NOFS);
2995 if (!fixup)
2996 return -EAGAIN;
2997
2998 /*
2999 * We are already holding a reference to this inode from
3000 * write_cache_pages. We need to hold it because the space reservation
3001 * takes place outside of the folio lock, and we can't trust
3002 * folio->mapping outside of the folio lock.
3003 */
3004 ihold(inode);
3005 btrfs_folio_set_checked(fs_info, folio, folio_pos(folio), folio_size(folio));
3006 folio_get(folio);
3007 btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL);
3008 fixup->folio = folio;
3009 fixup->inode = BTRFS_I(inode);
3010 btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
3011
3012 return -EAGAIN;
3013 }
3014
insert_reserved_file_extent(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,u64 file_pos,struct btrfs_file_extent_item * stack_fi,const bool update_inode_bytes,u64 qgroup_reserved)3015 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
3016 struct btrfs_inode *inode, u64 file_pos,
3017 struct btrfs_file_extent_item *stack_fi,
3018 const bool update_inode_bytes,
3019 u64 qgroup_reserved)
3020 {
3021 struct btrfs_root *root = inode->root;
3022 const u64 sectorsize = root->fs_info->sectorsize;
3023 BTRFS_PATH_AUTO_FREE(path);
3024 struct extent_buffer *leaf;
3025 struct btrfs_key ins;
3026 u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi);
3027 u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi);
3028 u64 offset = btrfs_stack_file_extent_offset(stack_fi);
3029 u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi);
3030 u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi);
3031 struct btrfs_drop_extents_args drop_args = { 0 };
3032 int ret;
3033
3034 path = btrfs_alloc_path();
3035 if (!path)
3036 return -ENOMEM;
3037
3038 /*
3039 * we may be replacing one extent in the tree with another.
3040 * The new extent is pinned in the extent map, and we don't want
3041 * to drop it from the cache until it is completely in the btree.
3042 *
3043 * So, tell btrfs_drop_extents to leave this extent in the cache.
3044 * the caller is expected to unpin it and allow it to be merged
3045 * with the others.
3046 */
3047 drop_args.path = path;
3048 drop_args.start = file_pos;
3049 drop_args.end = file_pos + num_bytes;
3050 drop_args.replace_extent = true;
3051 drop_args.extent_item_size = sizeof(*stack_fi);
3052 ret = btrfs_drop_extents(trans, root, inode, &drop_args);
3053 if (ret)
3054 return ret;
3055
3056 if (!drop_args.extent_inserted) {
3057 ins.objectid = btrfs_ino(inode);
3058 ins.type = BTRFS_EXTENT_DATA_KEY;
3059 ins.offset = file_pos;
3060
3061 ret = btrfs_insert_empty_item(trans, root, path, &ins,
3062 sizeof(*stack_fi));
3063 if (ret)
3064 return ret;
3065 }
3066 leaf = path->nodes[0];
3067 btrfs_set_stack_file_extent_generation(stack_fi, trans->transid);
3068 write_extent_buffer(leaf, stack_fi,
3069 btrfs_item_ptr_offset(leaf, path->slots[0]),
3070 sizeof(struct btrfs_file_extent_item));
3071
3072 btrfs_release_path(path);
3073
3074 /*
3075 * If we dropped an inline extent here, we know the range where it is
3076 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the
3077 * number of bytes only for that range containing the inline extent.
3078 * The remaining of the range will be processed when clearing the
3079 * EXTENT_DELALLOC_BIT bit through the ordered extent completion.
3080 */
3081 if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) {
3082 u64 inline_size = round_down(drop_args.bytes_found, sectorsize);
3083
3084 inline_size = drop_args.bytes_found - inline_size;
3085 btrfs_update_inode_bytes(inode, sectorsize, inline_size);
3086 drop_args.bytes_found -= inline_size;
3087 num_bytes -= sectorsize;
3088 }
3089
3090 if (update_inode_bytes)
3091 btrfs_update_inode_bytes(inode, num_bytes, drop_args.bytes_found);
3092
3093 ins.objectid = disk_bytenr;
3094 ins.type = BTRFS_EXTENT_ITEM_KEY;
3095 ins.offset = disk_num_bytes;
3096
3097 ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes);
3098 if (ret)
3099 return ret;
3100
3101 return btrfs_alloc_reserved_file_extent(trans, root, btrfs_ino(inode),
3102 file_pos - offset,
3103 qgroup_reserved, &ins);
3104 }
3105
btrfs_release_delalloc_bytes(struct btrfs_fs_info * fs_info,u64 start,u64 len)3106 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
3107 u64 start, u64 len)
3108 {
3109 struct btrfs_block_group *cache;
3110
3111 cache = btrfs_lookup_block_group(fs_info, start);
3112 ASSERT(cache);
3113
3114 spin_lock(&cache->lock);
3115 cache->delalloc_bytes -= len;
3116 spin_unlock(&cache->lock);
3117
3118 btrfs_put_block_group(cache);
3119 }
3120
insert_ordered_extent_file_extent(struct btrfs_trans_handle * trans,struct btrfs_ordered_extent * oe)3121 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans,
3122 struct btrfs_ordered_extent *oe)
3123 {
3124 struct btrfs_file_extent_item stack_fi;
3125 bool update_inode_bytes;
3126 u64 num_bytes = oe->num_bytes;
3127 u64 ram_bytes = oe->ram_bytes;
3128
3129 memset(&stack_fi, 0, sizeof(stack_fi));
3130 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG);
3131 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr);
3132 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi,
3133 oe->disk_num_bytes);
3134 btrfs_set_stack_file_extent_offset(&stack_fi, oe->offset);
3135 if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags))
3136 num_bytes = oe->truncated_len;
3137 btrfs_set_stack_file_extent_num_bytes(&stack_fi, num_bytes);
3138 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, ram_bytes);
3139 btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type);
3140 /* Encryption and other encoding is reserved and all 0 */
3141
3142 /*
3143 * For delalloc, when completing an ordered extent we update the inode's
3144 * bytes when clearing the range in the inode's io tree, so pass false
3145 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(),
3146 * except if the ordered extent was truncated.
3147 */
3148 update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) ||
3149 test_bit(BTRFS_ORDERED_ENCODED, &oe->flags) ||
3150 test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags);
3151
3152 return insert_reserved_file_extent(trans, oe->inode,
3153 oe->file_offset, &stack_fi,
3154 update_inode_bytes, oe->qgroup_rsv);
3155 }
3156
3157 /*
3158 * As ordered data IO finishes, this gets called so we can finish
3159 * an ordered extent if the range of bytes in the file it covers are
3160 * fully written.
3161 */
btrfs_finish_one_ordered(struct btrfs_ordered_extent * ordered_extent)3162 int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
3163 {
3164 struct btrfs_inode *inode = ordered_extent->inode;
3165 struct btrfs_root *root = inode->root;
3166 struct btrfs_fs_info *fs_info = root->fs_info;
3167 struct btrfs_trans_handle *trans = NULL;
3168 struct extent_io_tree *io_tree = &inode->io_tree;
3169 struct extent_state *cached_state = NULL;
3170 u64 start, end;
3171 int compress_type = 0;
3172 int ret = 0;
3173 u64 logical_len = ordered_extent->num_bytes;
3174 bool freespace_inode;
3175 bool truncated = false;
3176 bool clear_reserved_extent = true;
3177 unsigned int clear_bits = EXTENT_DEFRAG;
3178
3179 start = ordered_extent->file_offset;
3180 end = start + ordered_extent->num_bytes - 1;
3181
3182 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3183 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
3184 !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags) &&
3185 !test_bit(BTRFS_ORDERED_ENCODED, &ordered_extent->flags))
3186 clear_bits |= EXTENT_DELALLOC_NEW;
3187
3188 freespace_inode = btrfs_is_free_space_inode(inode);
3189 if (!freespace_inode)
3190 btrfs_lockdep_acquire(fs_info, btrfs_ordered_extent);
3191
3192 if (unlikely(test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags))) {
3193 ret = -EIO;
3194 goto out;
3195 }
3196
3197 ret = btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr,
3198 ordered_extent->disk_num_bytes);
3199 if (ret)
3200 goto out;
3201
3202 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
3203 truncated = true;
3204 logical_len = ordered_extent->truncated_len;
3205 /* Truncated the entire extent, don't bother adding */
3206 if (!logical_len)
3207 goto out;
3208 }
3209
3210 /*
3211 * If it's a COW write we need to lock the extent range as we will be
3212 * inserting/replacing file extent items and unpinning an extent map.
3213 * This must be taken before joining a transaction, as it's a higher
3214 * level lock (like the inode's VFS lock), otherwise we can run into an
3215 * ABBA deadlock with other tasks (transactions work like a lock,
3216 * depending on their current state).
3217 */
3218 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
3219 clear_bits |= EXTENT_LOCKED | EXTENT_FINISHING_ORDERED;
3220 btrfs_lock_extent_bits(io_tree, start, end,
3221 EXTENT_LOCKED | EXTENT_FINISHING_ORDERED,
3222 &cached_state);
3223 }
3224
3225 if (freespace_inode)
3226 trans = btrfs_join_transaction_spacecache(root);
3227 else
3228 trans = btrfs_join_transaction(root);
3229 if (IS_ERR(trans)) {
3230 ret = PTR_ERR(trans);
3231 trans = NULL;
3232 goto out;
3233 }
3234
3235 trans->block_rsv = &inode->block_rsv;
3236
3237 ret = btrfs_insert_raid_extent(trans, ordered_extent);
3238 if (unlikely(ret)) {
3239 btrfs_abort_transaction(trans, ret);
3240 goto out;
3241 }
3242
3243 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
3244 /* Logic error */
3245 ASSERT(list_empty(&ordered_extent->list));
3246 if (unlikely(!list_empty(&ordered_extent->list))) {
3247 ret = -EINVAL;
3248 btrfs_abort_transaction(trans, ret);
3249 goto out;
3250 }
3251
3252 btrfs_inode_safe_disk_i_size_write(inode, 0);
3253 ret = btrfs_update_inode_fallback(trans, inode);
3254 if (unlikely(ret)) {
3255 /* -ENOMEM or corruption */
3256 btrfs_abort_transaction(trans, ret);
3257 }
3258 goto out;
3259 }
3260
3261 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
3262 compress_type = ordered_extent->compress_type;
3263 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3264 BUG_ON(compress_type);
3265 ret = btrfs_mark_extent_written(trans, inode,
3266 ordered_extent->file_offset,
3267 ordered_extent->file_offset +
3268 logical_len);
3269 btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr,
3270 ordered_extent->disk_num_bytes);
3271 if (unlikely(ret < 0)) {
3272 btrfs_abort_transaction(trans, ret);
3273 goto out;
3274 }
3275 } else {
3276 BUG_ON(root == fs_info->tree_root);
3277 ret = insert_ordered_extent_file_extent(trans, ordered_extent);
3278 if (unlikely(ret < 0)) {
3279 btrfs_abort_transaction(trans, ret);
3280 goto out;
3281 }
3282 clear_reserved_extent = false;
3283 btrfs_release_delalloc_bytes(fs_info,
3284 ordered_extent->disk_bytenr,
3285 ordered_extent->disk_num_bytes);
3286 }
3287
3288 ret = btrfs_unpin_extent_cache(inode, ordered_extent->file_offset,
3289 ordered_extent->num_bytes, trans->transid);
3290 if (unlikely(ret < 0)) {
3291 btrfs_abort_transaction(trans, ret);
3292 goto out;
3293 }
3294
3295 ret = add_pending_csums(trans, &ordered_extent->list);
3296 if (unlikely(ret)) {
3297 btrfs_abort_transaction(trans, ret);
3298 goto out;
3299 }
3300
3301 /*
3302 * If this is a new delalloc range, clear its new delalloc flag to
3303 * update the inode's number of bytes. This needs to be done first
3304 * before updating the inode item.
3305 */
3306 if ((clear_bits & EXTENT_DELALLOC_NEW) &&
3307 !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags))
3308 btrfs_clear_extent_bit(&inode->io_tree, start, end,
3309 EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES,
3310 &cached_state);
3311
3312 btrfs_inode_safe_disk_i_size_write(inode, 0);
3313 ret = btrfs_update_inode_fallback(trans, inode);
3314 if (unlikely(ret)) { /* -ENOMEM or corruption */
3315 btrfs_abort_transaction(trans, ret);
3316 goto out;
3317 }
3318 out:
3319 btrfs_clear_extent_bit(&inode->io_tree, start, end, clear_bits,
3320 &cached_state);
3321
3322 if (trans)
3323 btrfs_end_transaction(trans);
3324
3325 if (ret || truncated) {
3326 /*
3327 * If we failed to finish this ordered extent for any reason we
3328 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
3329 * extent, and mark the inode with the error if it wasn't
3330 * already set. Any error during writeback would have already
3331 * set the mapping error, so we need to set it if we're the ones
3332 * marking this ordered extent as failed.
3333 */
3334 if (ret)
3335 btrfs_mark_ordered_extent_error(ordered_extent);
3336
3337 /*
3338 * Drop extent maps for the part of the extent we didn't write.
3339 *
3340 * We have an exception here for the free_space_inode, this is
3341 * because when we do btrfs_get_extent() on the free space inode
3342 * we will search the commit root. If this is a new block group
3343 * we won't find anything, and we will trip over the assert in
3344 * writepage where we do ASSERT(em->block_start !=
3345 * EXTENT_MAP_HOLE).
3346 *
3347 * Theoretically we could also skip this for any NOCOW extent as
3348 * we don't mess with the extent map tree in the NOCOW case, but
3349 * for now simply skip this if we are the free space inode.
3350 */
3351 if (!btrfs_is_free_space_inode(inode)) {
3352 u64 unwritten_start = start;
3353
3354 if (truncated)
3355 unwritten_start += logical_len;
3356
3357 btrfs_drop_extent_map_range(inode, unwritten_start,
3358 end, false);
3359 }
3360
3361 /*
3362 * If the ordered extent had an IOERR or something else went
3363 * wrong we need to return the space for this ordered extent
3364 * back to the allocator. We only free the extent in the
3365 * truncated case if we didn't write out the extent at all.
3366 *
3367 * If we made it past insert_reserved_file_extent before we
3368 * errored out then we don't need to do this as the accounting
3369 * has already been done.
3370 */
3371 if ((ret || !logical_len) &&
3372 clear_reserved_extent &&
3373 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3374 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3375 /*
3376 * Discard the range before returning it back to the
3377 * free space pool
3378 */
3379 if (ret && btrfs_test_opt(fs_info, DISCARD_SYNC))
3380 btrfs_discard_extent(fs_info,
3381 ordered_extent->disk_bytenr,
3382 ordered_extent->disk_num_bytes,
3383 NULL, true);
3384 btrfs_free_reserved_extent(fs_info,
3385 ordered_extent->disk_bytenr,
3386 ordered_extent->disk_num_bytes, true);
3387 /*
3388 * Actually free the qgroup rsv which was released when
3389 * the ordered extent was created.
3390 */
3391 btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(inode->root),
3392 ordered_extent->qgroup_rsv,
3393 BTRFS_QGROUP_RSV_DATA);
3394 }
3395 }
3396
3397 /*
3398 * This needs to be done to make sure anybody waiting knows we are done
3399 * updating everything for this ordered extent.
3400 */
3401 btrfs_remove_ordered_extent(inode, ordered_extent);
3402
3403 /* once for us */
3404 btrfs_put_ordered_extent(ordered_extent);
3405 /* once for the tree */
3406 btrfs_put_ordered_extent(ordered_extent);
3407
3408 return ret;
3409 }
3410
btrfs_finish_ordered_io(struct btrfs_ordered_extent * ordered)3411 int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered)
3412 {
3413 if (btrfs_is_zoned(ordered->inode->root->fs_info) &&
3414 !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
3415 list_empty(&ordered->bioc_list))
3416 btrfs_finish_ordered_zoned(ordered);
3417 return btrfs_finish_one_ordered(ordered);
3418 }
3419
3420 /*
3421 * Calculate the checksum of an fs block at physical memory address @paddr,
3422 * and save the result to @dest.
3423 *
3424 * The folio containing @paddr must be large enough to contain a full fs block.
3425 */
btrfs_calculate_block_csum_folio(struct btrfs_fs_info * fs_info,const phys_addr_t paddr,u8 * dest)3426 void btrfs_calculate_block_csum_folio(struct btrfs_fs_info *fs_info,
3427 const phys_addr_t paddr, u8 *dest)
3428 {
3429 struct folio *folio = page_folio(phys_to_page(paddr));
3430 const u32 blocksize = fs_info->sectorsize;
3431 const u32 step = min(blocksize, PAGE_SIZE);
3432 const u32 nr_steps = blocksize / step;
3433 phys_addr_t paddrs[BTRFS_MAX_BLOCKSIZE / PAGE_SIZE];
3434
3435 /* The full block must be inside the folio. */
3436 ASSERT(offset_in_folio(folio, paddr) + blocksize <= folio_size(folio));
3437
3438 for (int i = 0; i < nr_steps; i++) {
3439 u32 pindex = offset_in_folio(folio, paddr + i * step) >> PAGE_SHIFT;
3440
3441 /*
3442 * For bs <= ps cases, we will only run the loop once, so the offset
3443 * inside the page will only added to paddrs[0].
3444 *
3445 * For bs > ps cases, the block must be page aligned, thus offset
3446 * inside the page will always be 0.
3447 */
3448 paddrs[i] = page_to_phys(folio_page(folio, pindex)) + offset_in_page(paddr);
3449 }
3450 return btrfs_calculate_block_csum_pages(fs_info, paddrs, dest);
3451 }
3452
3453 /*
3454 * Calculate the checksum of a fs block backed by multiple noncontiguous pages
3455 * at @paddrs[] and save the result to @dest.
3456 *
3457 * The folio containing @paddr must be large enough to contain a full fs block.
3458 */
btrfs_calculate_block_csum_pages(struct btrfs_fs_info * fs_info,const phys_addr_t paddrs[],u8 * dest)3459 void btrfs_calculate_block_csum_pages(struct btrfs_fs_info *fs_info,
3460 const phys_addr_t paddrs[], u8 *dest)
3461 {
3462 const u32 blocksize = fs_info->sectorsize;
3463 const u32 step = min(blocksize, PAGE_SIZE);
3464 const u32 nr_steps = blocksize / step;
3465 struct btrfs_csum_ctx csum;
3466
3467 btrfs_csum_init(&csum, fs_info->csum_type);
3468 for (int i = 0; i < nr_steps; i++) {
3469 const phys_addr_t paddr = paddrs[i];
3470 void *kaddr;
3471
3472 ASSERT(offset_in_page(paddr) + step <= PAGE_SIZE);
3473 kaddr = kmap_local_page(phys_to_page(paddr)) + offset_in_page(paddr);
3474 btrfs_csum_update(&csum, kaddr, step);
3475 kunmap_local(kaddr);
3476 }
3477 btrfs_csum_final(&csum, dest);
3478 }
3479
3480 /*
3481 * Verify the checksum for a single sector without any extra action that depend
3482 * on the type of I/O.
3483 *
3484 * @kaddr must be a properly kmapped address.
3485 */
btrfs_check_block_csum(struct btrfs_fs_info * fs_info,phys_addr_t paddr,u8 * csum,const u8 * const csum_expected)3486 int btrfs_check_block_csum(struct btrfs_fs_info *fs_info, phys_addr_t paddr, u8 *csum,
3487 const u8 * const csum_expected)
3488 {
3489 btrfs_calculate_block_csum_folio(fs_info, paddr, csum);
3490 if (unlikely(memcmp(csum, csum_expected, fs_info->csum_size) != 0))
3491 return -EIO;
3492 return 0;
3493 }
3494
3495 /*
3496 * Verify the checksum of a single data sector, which can be scattered at
3497 * different noncontiguous pages.
3498 *
3499 * @bbio: btrfs_io_bio which contains the csum
3500 * @dev: device the sector is on
3501 * @bio_offset: offset to the beginning of the bio (in bytes)
3502 * @paddrs: physical addresses which back the fs block
3503 *
3504 * Check if the checksum on a data block is valid. When a checksum mismatch is
3505 * detected, report the error and fill the corrupted range with zero.
3506 *
3507 * Return %true if the sector is ok or had no checksum to start with, else %false.
3508 */
btrfs_data_csum_ok(struct btrfs_bio * bbio,struct btrfs_device * dev,u32 bio_offset,const phys_addr_t paddrs[])3509 bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev,
3510 u32 bio_offset, const phys_addr_t paddrs[])
3511 {
3512 struct btrfs_inode *inode = bbio->inode;
3513 struct btrfs_fs_info *fs_info = inode->root->fs_info;
3514 const u32 blocksize = fs_info->sectorsize;
3515 const u32 step = min(blocksize, PAGE_SIZE);
3516 const u32 nr_steps = blocksize / step;
3517 u64 file_offset = bbio->file_offset + bio_offset;
3518 u64 end = file_offset + blocksize - 1;
3519 u8 *csum_expected;
3520 u8 csum[BTRFS_CSUM_SIZE];
3521
3522 if (!bbio->csum)
3523 return true;
3524
3525 if (btrfs_is_data_reloc_root(inode->root) &&
3526 btrfs_test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM,
3527 NULL)) {
3528 /* Skip the range without csum for data reloc inode */
3529 btrfs_clear_extent_bit(&inode->io_tree, file_offset, end,
3530 EXTENT_NODATASUM, NULL);
3531 return true;
3532 }
3533
3534 csum_expected = bbio->csum + (bio_offset >> fs_info->sectorsize_bits) *
3535 fs_info->csum_size;
3536 btrfs_calculate_block_csum_pages(fs_info, paddrs, csum);
3537 if (unlikely(memcmp(csum, csum_expected, fs_info->csum_size) != 0))
3538 goto zeroit;
3539 return true;
3540
3541 zeroit:
3542 btrfs_print_data_csum_error(inode, file_offset, csum, csum_expected,
3543 bbio->mirror_num);
3544 if (dev)
3545 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS);
3546 for (int i = 0; i < nr_steps; i++)
3547 memzero_page(phys_to_page(paddrs[i]), offset_in_page(paddrs[i]), step);
3548 return false;
3549 }
3550
3551 /*
3552 * Perform a delayed iput on @inode.
3553 *
3554 * @inode: The inode we want to perform iput on
3555 *
3556 * This function uses the generic vfs_inode::i_count to track whether we should
3557 * just decrement it (in case it's > 1) or if this is the last iput then link
3558 * the inode to the delayed iput machinery. Delayed iputs are processed at
3559 * transaction commit time/superblock commit/cleaner kthread.
3560 */
btrfs_add_delayed_iput(struct btrfs_inode * inode)3561 void btrfs_add_delayed_iput(struct btrfs_inode *inode)
3562 {
3563 struct btrfs_fs_info *fs_info = inode->root->fs_info;
3564 unsigned long flags;
3565
3566 if (atomic_add_unless(&inode->vfs_inode.i_count, -1, 1))
3567 return;
3568
3569 WARN_ON_ONCE(test_bit(BTRFS_FS_STATE_NO_DELAYED_IPUT, &fs_info->fs_state));
3570 atomic_inc(&fs_info->nr_delayed_iputs);
3571 /*
3572 * Need to be irq safe here because we can be called from either an irq
3573 * context (see bio.c and btrfs_put_ordered_extent()) or a non-irq
3574 * context.
3575 */
3576 spin_lock_irqsave(&fs_info->delayed_iput_lock, flags);
3577 ASSERT(list_empty(&inode->delayed_iput));
3578 list_add_tail(&inode->delayed_iput, &fs_info->delayed_iputs);
3579 spin_unlock_irqrestore(&fs_info->delayed_iput_lock, flags);
3580 if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags))
3581 wake_up_process(fs_info->cleaner_kthread);
3582 }
3583
run_delayed_iput_locked(struct btrfs_fs_info * fs_info,struct btrfs_inode * inode)3584 static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info,
3585 struct btrfs_inode *inode)
3586 {
3587 list_del_init(&inode->delayed_iput);
3588 spin_unlock_irq(&fs_info->delayed_iput_lock);
3589 iput(&inode->vfs_inode);
3590 if (atomic_dec_and_test(&fs_info->nr_delayed_iputs))
3591 wake_up(&fs_info->delayed_iputs_wait);
3592 spin_lock_irq(&fs_info->delayed_iput_lock);
3593 }
3594
btrfs_run_delayed_iput(struct btrfs_fs_info * fs_info,struct btrfs_inode * inode)3595 static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info,
3596 struct btrfs_inode *inode)
3597 {
3598 if (!list_empty(&inode->delayed_iput)) {
3599 spin_lock_irq(&fs_info->delayed_iput_lock);
3600 if (!list_empty(&inode->delayed_iput))
3601 run_delayed_iput_locked(fs_info, inode);
3602 spin_unlock_irq(&fs_info->delayed_iput_lock);
3603 }
3604 }
3605
btrfs_run_delayed_iputs(struct btrfs_fs_info * fs_info)3606 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
3607 {
3608 /*
3609 * btrfs_put_ordered_extent() can run in irq context (see bio.c), which
3610 * calls btrfs_add_delayed_iput() and that needs to lock
3611 * fs_info->delayed_iput_lock. So we need to disable irqs here to
3612 * prevent a deadlock.
3613 */
3614 spin_lock_irq(&fs_info->delayed_iput_lock);
3615 while (!list_empty(&fs_info->delayed_iputs)) {
3616 struct btrfs_inode *inode;
3617
3618 inode = list_first_entry(&fs_info->delayed_iputs,
3619 struct btrfs_inode, delayed_iput);
3620 run_delayed_iput_locked(fs_info, inode);
3621 if (need_resched()) {
3622 spin_unlock_irq(&fs_info->delayed_iput_lock);
3623 cond_resched();
3624 spin_lock_irq(&fs_info->delayed_iput_lock);
3625 }
3626 }
3627 spin_unlock_irq(&fs_info->delayed_iput_lock);
3628 }
3629
3630 /*
3631 * Wait for flushing all delayed iputs
3632 *
3633 * @fs_info: the filesystem
3634 *
3635 * This will wait on any delayed iputs that are currently running with KILLABLE
3636 * set. Once they are all done running we will return, unless we are killed in
3637 * which case we return EINTR. This helps in user operations like fallocate etc
3638 * that might get blocked on the iputs.
3639 *
3640 * Return EINTR if we were killed, 0 if nothing's pending
3641 */
btrfs_wait_on_delayed_iputs(struct btrfs_fs_info * fs_info)3642 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info)
3643 {
3644 int ret = wait_event_killable(fs_info->delayed_iputs_wait,
3645 atomic_read(&fs_info->nr_delayed_iputs) == 0);
3646 if (ret)
3647 return -EINTR;
3648 return 0;
3649 }
3650
3651 /*
3652 * This creates an orphan entry for the given inode in case something goes wrong
3653 * in the middle of an unlink.
3654 */
btrfs_orphan_add(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)3655 int btrfs_orphan_add(struct btrfs_trans_handle *trans,
3656 struct btrfs_inode *inode)
3657 {
3658 int ret;
3659
3660 ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode));
3661 if (unlikely(ret && ret != -EEXIST)) {
3662 btrfs_abort_transaction(trans, ret);
3663 return ret;
3664 }
3665
3666 return 0;
3667 }
3668
3669 /*
3670 * We have done the delete so we can go ahead and remove the orphan item for
3671 * this particular inode.
3672 */
btrfs_orphan_del(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)3673 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3674 struct btrfs_inode *inode)
3675 {
3676 return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode));
3677 }
3678
3679 /*
3680 * this cleans up any orphans that may be left on the list from the last use
3681 * of this root.
3682 */
btrfs_orphan_cleanup(struct btrfs_root * root)3683 int btrfs_orphan_cleanup(struct btrfs_root *root)
3684 {
3685 struct btrfs_fs_info *fs_info = root->fs_info;
3686 BTRFS_PATH_AUTO_FREE(path);
3687 struct extent_buffer *leaf;
3688 struct btrfs_key key, found_key;
3689 struct btrfs_trans_handle *trans;
3690 u64 last_objectid = 0;
3691 int ret = 0, nr_unlink = 0;
3692
3693 if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state))
3694 return 0;
3695
3696 path = btrfs_alloc_path();
3697 if (!path) {
3698 ret = -ENOMEM;
3699 goto out;
3700 }
3701 path->reada = READA_BACK;
3702
3703 key.objectid = BTRFS_ORPHAN_OBJECTID;
3704 key.type = BTRFS_ORPHAN_ITEM_KEY;
3705 key.offset = (u64)-1;
3706
3707 while (1) {
3708 struct btrfs_inode *inode;
3709
3710 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3711 if (ret < 0)
3712 goto out;
3713
3714 /*
3715 * if ret == 0 means we found what we were searching for, which
3716 * is weird, but possible, so only screw with path if we didn't
3717 * find the key and see if we have stuff that matches
3718 */
3719 if (ret > 0) {
3720 ret = 0;
3721 if (path->slots[0] == 0)
3722 break;
3723 path->slots[0]--;
3724 }
3725
3726 /* pull out the item */
3727 leaf = path->nodes[0];
3728 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3729
3730 /* make sure the item matches what we want */
3731 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3732 break;
3733 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
3734 break;
3735
3736 /* release the path since we're done with it */
3737 btrfs_release_path(path);
3738
3739 /*
3740 * this is where we are basically btrfs_lookup, without the
3741 * crossing root thing. we store the inode number in the
3742 * offset of the orphan item.
3743 */
3744
3745 if (found_key.offset == last_objectid) {
3746 /*
3747 * We found the same inode as before. This means we were
3748 * not able to remove its items via eviction triggered
3749 * by an iput(). A transaction abort may have happened,
3750 * due to -ENOSPC for example, so try to grab the error
3751 * that lead to a transaction abort, if any.
3752 */
3753 btrfs_err(fs_info,
3754 "Error removing orphan entry, stopping orphan cleanup");
3755 ret = BTRFS_FS_ERROR(fs_info) ?: -EINVAL;
3756 goto out;
3757 }
3758
3759 last_objectid = found_key.offset;
3760
3761 found_key.objectid = found_key.offset;
3762 found_key.type = BTRFS_INODE_ITEM_KEY;
3763 found_key.offset = 0;
3764 inode = btrfs_iget(last_objectid, root);
3765 if (IS_ERR(inode)) {
3766 ret = PTR_ERR(inode);
3767 inode = NULL;
3768 if (ret != -ENOENT)
3769 goto out;
3770 }
3771
3772 if (!inode && root == fs_info->tree_root) {
3773 struct btrfs_root *dead_root;
3774 int is_dead_root = 0;
3775
3776 /*
3777 * This is an orphan in the tree root. Currently these
3778 * could come from 2 sources:
3779 * a) a root (snapshot/subvolume) deletion in progress
3780 * b) a free space cache inode
3781 * We need to distinguish those two, as the orphan item
3782 * for a root must not get deleted before the deletion
3783 * of the snapshot/subvolume's tree completes.
3784 *
3785 * btrfs_find_orphan_roots() ran before us, which has
3786 * found all deleted roots and loaded them into
3787 * fs_info->fs_roots_radix. So here we can find if an
3788 * orphan item corresponds to a deleted root by looking
3789 * up the root from that radix tree.
3790 */
3791
3792 spin_lock(&fs_info->fs_roots_radix_lock);
3793 dead_root = radix_tree_lookup(&fs_info->fs_roots_radix,
3794 (unsigned long)found_key.objectid);
3795 if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0)
3796 is_dead_root = 1;
3797 spin_unlock(&fs_info->fs_roots_radix_lock);
3798
3799 if (is_dead_root) {
3800 /* prevent this orphan from being found again */
3801 key.offset = found_key.objectid - 1;
3802 continue;
3803 }
3804
3805 }
3806
3807 /*
3808 * If we have an inode with links, there are a couple of
3809 * possibilities:
3810 *
3811 * 1. We were halfway through creating fsverity metadata for the
3812 * file. In that case, the orphan item represents incomplete
3813 * fsverity metadata which must be cleaned up with
3814 * btrfs_drop_verity_items and deleting the orphan item.
3815
3816 * 2. Old kernels (before v3.12) used to create an
3817 * orphan item for truncate indicating that there were possibly
3818 * extent items past i_size that needed to be deleted. In v3.12,
3819 * truncate was changed to update i_size in sync with the extent
3820 * items, but the (useless) orphan item was still created. Since
3821 * v4.18, we don't create the orphan item for truncate at all.
3822 *
3823 * So, this item could mean that we need to do a truncate, but
3824 * only if this filesystem was last used on a pre-v3.12 kernel
3825 * and was not cleanly unmounted. The odds of that are quite
3826 * slim, and it's a pain to do the truncate now, so just delete
3827 * the orphan item.
3828 *
3829 * It's also possible that this orphan item was supposed to be
3830 * deleted but wasn't. The inode number may have been reused,
3831 * but either way, we can delete the orphan item.
3832 */
3833 if (!inode || inode->vfs_inode.i_nlink) {
3834 if (inode) {
3835 ret = btrfs_drop_verity_items(inode);
3836 iput(&inode->vfs_inode);
3837 inode = NULL;
3838 if (ret)
3839 goto out;
3840 }
3841 trans = btrfs_start_transaction(root, 1);
3842 if (IS_ERR(trans)) {
3843 ret = PTR_ERR(trans);
3844 goto out;
3845 }
3846 btrfs_debug(fs_info, "auto deleting %Lu",
3847 found_key.objectid);
3848 ret = btrfs_del_orphan_item(trans, root,
3849 found_key.objectid);
3850 btrfs_end_transaction(trans);
3851 if (ret)
3852 goto out;
3853 continue;
3854 }
3855
3856 nr_unlink++;
3857
3858 /* this will do delete_inode and everything for us */
3859 iput(&inode->vfs_inode);
3860 }
3861 /* release the path since we're done with it */
3862 btrfs_release_path(path);
3863
3864 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
3865 trans = btrfs_join_transaction(root);
3866 if (!IS_ERR(trans))
3867 btrfs_end_transaction(trans);
3868 }
3869
3870 if (nr_unlink)
3871 btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink);
3872
3873 out:
3874 if (ret)
3875 btrfs_err(fs_info, "could not do orphan cleanup %d", ret);
3876 return ret;
3877 }
3878
3879 /*
3880 * Look ahead in the leaf for xattrs. If we don't find any then we know there
3881 * can't be any ACLs.
3882 *
3883 * @leaf: the eb leaf where to search
3884 * @slot: the slot the inode is in
3885 * @objectid: the objectid of the inode
3886 *
3887 * Return true if there is xattr/ACL, false otherwise.
3888 */
acls_after_inode_item(struct extent_buffer * leaf,int slot,u64 objectid,int * first_xattr_slot)3889 static noinline bool acls_after_inode_item(struct extent_buffer *leaf,
3890 int slot, u64 objectid,
3891 int *first_xattr_slot)
3892 {
3893 u32 nritems = btrfs_header_nritems(leaf);
3894 struct btrfs_key found_key;
3895 static u64 xattr_access = 0;
3896 static u64 xattr_default = 0;
3897 int scanned = 0;
3898
3899 if (!xattr_access) {
3900 xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS,
3901 strlen(XATTR_NAME_POSIX_ACL_ACCESS));
3902 xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT,
3903 strlen(XATTR_NAME_POSIX_ACL_DEFAULT));
3904 }
3905
3906 slot++;
3907 *first_xattr_slot = -1;
3908 while (slot < nritems) {
3909 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3910
3911 /* We found a different objectid, there must be no ACLs. */
3912 if (found_key.objectid != objectid)
3913 return false;
3914
3915 /* We found an xattr, assume we've got an ACL. */
3916 if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3917 if (*first_xattr_slot == -1)
3918 *first_xattr_slot = slot;
3919 if (found_key.offset == xattr_access ||
3920 found_key.offset == xattr_default)
3921 return true;
3922 }
3923
3924 /*
3925 * We found a key greater than an xattr key, there can't be any
3926 * ACLs later on.
3927 */
3928 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3929 return false;
3930
3931 slot++;
3932 scanned++;
3933
3934 /*
3935 * The item order goes like:
3936 * - inode
3937 * - inode backrefs
3938 * - xattrs
3939 * - extents,
3940 *
3941 * so if there are lots of hard links to an inode there can be
3942 * a lot of backrefs. Don't waste time searching too hard,
3943 * this is just an optimization.
3944 */
3945 if (scanned >= 8)
3946 break;
3947 }
3948 /*
3949 * We hit the end of the leaf before we found an xattr or something
3950 * larger than an xattr. We have to assume the inode has ACLs.
3951 */
3952 if (*first_xattr_slot == -1)
3953 *first_xattr_slot = slot;
3954 return true;
3955 }
3956
btrfs_init_file_extent_tree(struct btrfs_inode * inode)3957 static int btrfs_init_file_extent_tree(struct btrfs_inode *inode)
3958 {
3959 struct btrfs_fs_info *fs_info = inode->root->fs_info;
3960
3961 if (WARN_ON_ONCE(inode->file_extent_tree))
3962 return 0;
3963 if (btrfs_fs_incompat(fs_info, NO_HOLES))
3964 return 0;
3965 if (!S_ISREG(inode->vfs_inode.i_mode))
3966 return 0;
3967 if (btrfs_is_free_space_inode(inode))
3968 return 0;
3969
3970 inode->file_extent_tree = kmalloc_obj(struct extent_io_tree);
3971 if (!inode->file_extent_tree)
3972 return -ENOMEM;
3973
3974 btrfs_extent_io_tree_init(fs_info, inode->file_extent_tree,
3975 IO_TREE_INODE_FILE_EXTENT);
3976 /* Lockdep class is set only for the file extent tree. */
3977 lockdep_set_class(&inode->file_extent_tree->lock, &file_extent_tree_class);
3978
3979 return 0;
3980 }
3981
btrfs_add_inode_to_root(struct btrfs_inode * inode,bool prealloc)3982 static int btrfs_add_inode_to_root(struct btrfs_inode *inode, bool prealloc)
3983 {
3984 struct btrfs_root *root = inode->root;
3985 struct btrfs_inode *existing;
3986 const u64 ino = btrfs_ino(inode);
3987 int ret;
3988
3989 if (inode_unhashed(&inode->vfs_inode))
3990 return 0;
3991
3992 if (prealloc) {
3993 ret = xa_reserve(&root->inodes, ino, GFP_NOFS);
3994 if (ret)
3995 return ret;
3996 }
3997
3998 existing = xa_store(&root->inodes, ino, inode, GFP_ATOMIC);
3999
4000 if (xa_is_err(existing)) {
4001 ret = xa_err(existing);
4002 ASSERT(ret != -EINVAL);
4003 ASSERT(ret != -ENOMEM);
4004 return ret;
4005 } else if (existing) {
4006 WARN_ON(!(inode_state_read_once(&existing->vfs_inode) & (I_WILL_FREE | I_FREEING)));
4007 }
4008
4009 return 0;
4010 }
4011
4012 /*
4013 * Read a locked inode from the btree into the in-memory inode and add it to
4014 * its root list/tree.
4015 *
4016 * On failure clean up the inode.
4017 */
btrfs_read_locked_inode(struct btrfs_inode * inode,struct btrfs_path * path)4018 static int btrfs_read_locked_inode(struct btrfs_inode *inode, struct btrfs_path *path)
4019 {
4020 struct btrfs_root *root = inode->root;
4021 struct btrfs_fs_info *fs_info = root->fs_info;
4022 struct extent_buffer *leaf;
4023 struct btrfs_inode_item *inode_item;
4024 struct inode *vfs_inode = &inode->vfs_inode;
4025 struct btrfs_key location;
4026 unsigned long ptr;
4027 int maybe_acls;
4028 u32 rdev;
4029 int ret;
4030 bool filled = false;
4031 int first_xattr_slot;
4032
4033 ret = btrfs_fill_inode(inode, &rdev);
4034 if (!ret)
4035 filled = true;
4036
4037 ASSERT(path);
4038
4039 btrfs_get_inode_key(inode, &location);
4040
4041 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
4042 if (ret) {
4043 /*
4044 * ret > 0 can come from btrfs_search_slot called by
4045 * btrfs_lookup_inode(), this means the inode was not found.
4046 */
4047 if (ret > 0)
4048 ret = -ENOENT;
4049 goto out;
4050 }
4051
4052 leaf = path->nodes[0];
4053
4054 if (filled)
4055 goto cache_index;
4056
4057 inode_item = btrfs_item_ptr(leaf, path->slots[0],
4058 struct btrfs_inode_item);
4059 vfs_inode->i_mode = btrfs_inode_mode(leaf, inode_item);
4060 set_nlink(vfs_inode, btrfs_inode_nlink(leaf, inode_item));
4061 i_uid_write(vfs_inode, btrfs_inode_uid(leaf, inode_item));
4062 i_gid_write(vfs_inode, btrfs_inode_gid(leaf, inode_item));
4063 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
4064
4065 inode_set_atime(vfs_inode, btrfs_timespec_sec(leaf, &inode_item->atime),
4066 btrfs_timespec_nsec(leaf, &inode_item->atime));
4067
4068 inode_set_mtime(vfs_inode, btrfs_timespec_sec(leaf, &inode_item->mtime),
4069 btrfs_timespec_nsec(leaf, &inode_item->mtime));
4070
4071 inode_set_ctime(vfs_inode, btrfs_timespec_sec(leaf, &inode_item->ctime),
4072 btrfs_timespec_nsec(leaf, &inode_item->ctime));
4073
4074 inode->i_otime_sec = btrfs_timespec_sec(leaf, &inode_item->otime);
4075 inode->i_otime_nsec = btrfs_timespec_nsec(leaf, &inode_item->otime);
4076
4077 inode_set_bytes(vfs_inode, btrfs_inode_nbytes(leaf, inode_item));
4078 inode->generation = btrfs_inode_generation(leaf, inode_item);
4079 inode->last_trans = btrfs_inode_transid(leaf, inode_item);
4080
4081 inode_set_iversion_queried(vfs_inode, btrfs_inode_sequence(leaf, inode_item));
4082 vfs_inode->i_generation = inode->generation;
4083 vfs_inode->i_rdev = 0;
4084 rdev = btrfs_inode_rdev(leaf, inode_item);
4085
4086 if (S_ISDIR(vfs_inode->i_mode))
4087 inode->index_cnt = (u64)-1;
4088
4089 btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item),
4090 &inode->flags, &inode->ro_flags);
4091 btrfs_update_inode_mapping_flags(inode);
4092 btrfs_set_inode_mapping_order(inode);
4093
4094 cache_index:
4095 /*
4096 * If we were modified in the current generation and evicted from memory
4097 * and then re-read we need to do a full sync since we don't have any
4098 * idea about which extents were modified before we were evicted from
4099 * cache.
4100 *
4101 * This is required for both inode re-read from disk and delayed inode
4102 * in the delayed_nodes xarray.
4103 */
4104 if (inode->last_trans == btrfs_get_fs_generation(fs_info))
4105 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
4106
4107 /*
4108 * We don't persist the id of the transaction where an unlink operation
4109 * against the inode was last made. So here we assume the inode might
4110 * have been evicted, and therefore the exact value of last_unlink_trans
4111 * lost, and set it to last_trans to avoid metadata inconsistencies
4112 * between the inode and its parent if the inode is fsync'ed and the log
4113 * replayed. For example, in the scenario:
4114 *
4115 * touch mydir/foo
4116 * ln mydir/foo mydir/bar
4117 * sync
4118 * unlink mydir/bar
4119 * echo 2 > /proc/sys/vm/drop_caches # evicts inode
4120 * xfs_io -c fsync mydir/foo
4121 * <power failure>
4122 * mount fs, triggers fsync log replay
4123 *
4124 * We must make sure that when we fsync our inode foo we also log its
4125 * parent inode, otherwise after log replay the parent still has the
4126 * dentry with the "bar" name but our inode foo has a link count of 1
4127 * and doesn't have an inode ref with the name "bar" anymore.
4128 *
4129 * Setting last_unlink_trans to last_trans is a pessimistic approach,
4130 * but it guarantees correctness at the expense of occasional full
4131 * transaction commits on fsync if our inode is a directory, or if our
4132 * inode is not a directory, logging its parent unnecessarily.
4133 */
4134 inode->last_unlink_trans = inode->last_trans;
4135
4136 /*
4137 * Same logic as for last_unlink_trans. We don't persist the generation
4138 * of the last transaction where this inode was used for a reflink
4139 * operation, so after eviction and reloading the inode we must be
4140 * pessimistic and assume the last transaction that modified the inode.
4141 */
4142 inode->last_reflink_trans = inode->last_trans;
4143
4144 path->slots[0]++;
4145 if (vfs_inode->i_nlink != 1 ||
4146 path->slots[0] >= btrfs_header_nritems(leaf))
4147 goto cache_acl;
4148
4149 btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
4150 if (location.objectid != btrfs_ino(inode))
4151 goto cache_acl;
4152
4153 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4154 if (location.type == BTRFS_INODE_REF_KEY) {
4155 struct btrfs_inode_ref *ref;
4156
4157 ref = (struct btrfs_inode_ref *)ptr;
4158 inode->dir_index = btrfs_inode_ref_index(leaf, ref);
4159 } else if (location.type == BTRFS_INODE_EXTREF_KEY) {
4160 struct btrfs_inode_extref *extref;
4161
4162 extref = (struct btrfs_inode_extref *)ptr;
4163 inode->dir_index = btrfs_inode_extref_index(leaf, extref);
4164 }
4165 cache_acl:
4166 /*
4167 * try to precache a NULL acl entry for files that don't have
4168 * any xattrs or acls
4169 */
4170 maybe_acls = acls_after_inode_item(leaf, path->slots[0],
4171 btrfs_ino(inode), &first_xattr_slot);
4172 if (first_xattr_slot != -1) {
4173 path->slots[0] = first_xattr_slot;
4174 ret = btrfs_load_inode_props(inode, path);
4175 if (ret)
4176 btrfs_err(fs_info,
4177 "error loading props for ino %llu (root %llu): %d",
4178 btrfs_ino(inode), btrfs_root_id(root), ret);
4179 }
4180
4181 /*
4182 * We don't need the path anymore, so release it to avoid holding a read
4183 * lock on a leaf while calling btrfs_init_file_extent_tree(), which can
4184 * allocate memory that triggers reclaim (GFP_KERNEL) and cause a locking
4185 * dependency.
4186 */
4187 btrfs_release_path(path);
4188
4189 ret = btrfs_init_file_extent_tree(inode);
4190 if (ret)
4191 goto out;
4192 btrfs_inode_set_file_extent_range(inode, 0,
4193 round_up(i_size_read(vfs_inode), fs_info->sectorsize));
4194
4195 if (!maybe_acls)
4196 cache_no_acl(vfs_inode);
4197
4198 switch (vfs_inode->i_mode & S_IFMT) {
4199 case S_IFREG:
4200 vfs_inode->i_mapping->a_ops = &btrfs_aops;
4201 vfs_inode->i_fop = &btrfs_file_operations;
4202 vfs_inode->i_op = &btrfs_file_inode_operations;
4203 break;
4204 case S_IFDIR:
4205 vfs_inode->i_fop = &btrfs_dir_file_operations;
4206 vfs_inode->i_op = &btrfs_dir_inode_operations;
4207 break;
4208 case S_IFLNK:
4209 vfs_inode->i_op = &btrfs_symlink_inode_operations;
4210 inode_nohighmem(vfs_inode);
4211 vfs_inode->i_mapping->a_ops = &btrfs_aops;
4212 break;
4213 default:
4214 vfs_inode->i_op = &btrfs_special_inode_operations;
4215 init_special_inode(vfs_inode, vfs_inode->i_mode, rdev);
4216 break;
4217 }
4218
4219 btrfs_sync_inode_flags_to_i_flags(inode);
4220
4221 ret = btrfs_add_inode_to_root(inode, true);
4222 if (ret)
4223 goto out;
4224
4225 return 0;
4226 out:
4227 /*
4228 * We may have a read locked leaf and iget_failed() triggers inode
4229 * eviction which needs to release the delayed inode and that needs
4230 * to lock the delayed inode's mutex. This can cause a ABBA deadlock
4231 * with a task running delayed items, as that require first locking
4232 * the delayed inode's mutex and then modifying its subvolume btree.
4233 * So release the path before iget_failed().
4234 */
4235 btrfs_release_path(path);
4236 iget_failed(vfs_inode);
4237 return ret;
4238 }
4239
4240 /*
4241 * given a leaf and an inode, copy the inode fields into the leaf
4242 */
fill_inode_item(struct btrfs_trans_handle * trans,struct extent_buffer * leaf,struct btrfs_inode_item * item,struct inode * inode)4243 static void fill_inode_item(struct btrfs_trans_handle *trans,
4244 struct extent_buffer *leaf,
4245 struct btrfs_inode_item *item,
4246 struct inode *inode)
4247 {
4248 u64 flags;
4249
4250 btrfs_set_inode_uid(leaf, item, i_uid_read(inode));
4251 btrfs_set_inode_gid(leaf, item, i_gid_read(inode));
4252 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
4253 btrfs_set_inode_mode(leaf, item, inode->i_mode);
4254 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
4255
4256 btrfs_set_timespec_sec(leaf, &item->atime, inode_get_atime_sec(inode));
4257 btrfs_set_timespec_nsec(leaf, &item->atime, inode_get_atime_nsec(inode));
4258
4259 btrfs_set_timespec_sec(leaf, &item->mtime, inode_get_mtime_sec(inode));
4260 btrfs_set_timespec_nsec(leaf, &item->mtime, inode_get_mtime_nsec(inode));
4261
4262 btrfs_set_timespec_sec(leaf, &item->ctime, inode_get_ctime_sec(inode));
4263 btrfs_set_timespec_nsec(leaf, &item->ctime, inode_get_ctime_nsec(inode));
4264
4265 btrfs_set_timespec_sec(leaf, &item->otime, BTRFS_I(inode)->i_otime_sec);
4266 btrfs_set_timespec_nsec(leaf, &item->otime, BTRFS_I(inode)->i_otime_nsec);
4267
4268 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
4269 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
4270 btrfs_set_inode_sequence(leaf, item, inode_peek_iversion(inode));
4271 btrfs_set_inode_transid(leaf, item, trans->transid);
4272 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
4273 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
4274 BTRFS_I(inode)->ro_flags);
4275 btrfs_set_inode_flags(leaf, item, flags);
4276 btrfs_set_inode_block_group(leaf, item, 0);
4277 }
4278
4279 /*
4280 * copy everything in the in-memory inode into the btree.
4281 */
btrfs_update_inode_item(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)4282 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
4283 struct btrfs_inode *inode)
4284 {
4285 struct btrfs_inode_item *inode_item;
4286 BTRFS_PATH_AUTO_FREE(path);
4287 struct extent_buffer *leaf;
4288 struct btrfs_key key;
4289 int ret;
4290
4291 path = btrfs_alloc_path();
4292 if (!path)
4293 return -ENOMEM;
4294
4295 btrfs_get_inode_key(inode, &key);
4296 ret = btrfs_lookup_inode(trans, inode->root, path, &key, 1);
4297 if (ret) {
4298 if (ret > 0)
4299 ret = -ENOENT;
4300 return ret;
4301 }
4302
4303 leaf = path->nodes[0];
4304 inode_item = btrfs_item_ptr(leaf, path->slots[0],
4305 struct btrfs_inode_item);
4306
4307 fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode);
4308 btrfs_set_inode_last_trans(trans, inode);
4309 return 0;
4310 }
4311
4312 /*
4313 * copy everything in the in-memory inode into the btree.
4314 */
btrfs_update_inode(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)4315 int btrfs_update_inode(struct btrfs_trans_handle *trans,
4316 struct btrfs_inode *inode)
4317 {
4318 struct btrfs_root *root = inode->root;
4319 struct btrfs_fs_info *fs_info = root->fs_info;
4320 int ret;
4321
4322 /*
4323 * If the inode is a free space inode, we can deadlock during commit
4324 * if we put it into the delayed code.
4325 *
4326 * The data relocation inode should also be directly updated
4327 * without delay
4328 */
4329 if (!btrfs_is_free_space_inode(inode)
4330 && !btrfs_is_data_reloc_root(root)
4331 && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
4332 btrfs_update_root_times(trans, root);
4333
4334 ret = btrfs_delayed_update_inode(trans, inode);
4335 if (!ret)
4336 btrfs_set_inode_last_trans(trans, inode);
4337 return ret;
4338 }
4339
4340 return btrfs_update_inode_item(trans, inode);
4341 }
4342
btrfs_update_inode_fallback(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)4343 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
4344 struct btrfs_inode *inode)
4345 {
4346 int ret;
4347
4348 ret = btrfs_update_inode(trans, inode);
4349 if (ret == -ENOSPC)
4350 return btrfs_update_inode_item(trans, inode);
4351 return ret;
4352 }
4353
update_time_after_link_or_unlink(struct btrfs_inode * dir)4354 static void update_time_after_link_or_unlink(struct btrfs_inode *dir)
4355 {
4356 struct timespec64 now;
4357
4358 /*
4359 * If we are replaying a log tree, we do not want to update the mtime
4360 * and ctime of the parent directory with the current time, since the
4361 * log replay procedure is responsible for setting them to their correct
4362 * values (the ones it had when the fsync was done).
4363 */
4364 if (test_bit(BTRFS_FS_LOG_RECOVERING, &dir->root->fs_info->flags))
4365 return;
4366
4367 now = inode_set_ctime_current(&dir->vfs_inode);
4368 inode_set_mtime_to_ts(&dir->vfs_inode, now);
4369 }
4370
4371 /*
4372 * unlink helper that gets used here in inode.c and in the tree logging
4373 * recovery code. It remove a link in a directory with a given name, and
4374 * also drops the back refs in the inode to the directory
4375 */
__btrfs_unlink_inode(struct btrfs_trans_handle * trans,struct btrfs_inode * dir,struct btrfs_inode * inode,const struct fscrypt_str * name,struct btrfs_rename_ctx * rename_ctx)4376 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4377 struct btrfs_inode *dir,
4378 struct btrfs_inode *inode,
4379 const struct fscrypt_str *name,
4380 struct btrfs_rename_ctx *rename_ctx)
4381 {
4382 struct btrfs_root *root = dir->root;
4383 struct btrfs_fs_info *fs_info = root->fs_info;
4384 struct btrfs_path *path;
4385 int ret = 0;
4386 struct btrfs_dir_item *di;
4387 u64 index;
4388 u64 ino = btrfs_ino(inode);
4389 u64 dir_ino = btrfs_ino(dir);
4390
4391 path = btrfs_alloc_path();
4392 if (!path)
4393 return -ENOMEM;
4394
4395 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, -1);
4396 if (IS_ERR_OR_NULL(di)) {
4397 btrfs_free_path(path);
4398 return di ? PTR_ERR(di) : -ENOENT;
4399 }
4400 ret = btrfs_delete_one_dir_name(trans, root, path, di);
4401 /*
4402 * Down the call chains below we'll also need to allocate a path, so no
4403 * need to hold on to this one for longer than necessary.
4404 */
4405 btrfs_free_path(path);
4406 if (ret)
4407 return ret;
4408
4409 /*
4410 * If we don't have dir index, we have to get it by looking up
4411 * the inode ref, since we get the inode ref, remove it directly,
4412 * it is unnecessary to do delayed deletion.
4413 *
4414 * But if we have dir index, needn't search inode ref to get it.
4415 * Since the inode ref is close to the inode item, it is better
4416 * that we delay to delete it, and just do this deletion when
4417 * we update the inode item.
4418 */
4419 if (inode->dir_index) {
4420 ret = btrfs_delayed_delete_inode_ref(inode);
4421 if (!ret) {
4422 index = inode->dir_index;
4423 goto skip_backref;
4424 }
4425 }
4426
4427 ret = btrfs_del_inode_ref(trans, root, name, ino, dir_ino, &index);
4428 if (unlikely(ret)) {
4429 btrfs_crit(fs_info,
4430 "failed to delete reference to %.*s, root %llu inode %llu parent %llu",
4431 name->len, name->name, btrfs_root_id(root), ino, dir_ino);
4432 btrfs_abort_transaction(trans, ret);
4433 return ret;
4434 }
4435 skip_backref:
4436 if (rename_ctx)
4437 rename_ctx->index = index;
4438
4439 ret = btrfs_delete_delayed_dir_index(trans, dir, index);
4440 if (unlikely(ret)) {
4441 btrfs_abort_transaction(trans, ret);
4442 return ret;
4443 }
4444
4445 /*
4446 * If we are in a rename context, we don't need to update anything in the
4447 * log. That will be done later during the rename by btrfs_log_new_name().
4448 * Besides that, doing it here would only cause extra unnecessary btree
4449 * operations on the log tree, increasing latency for applications.
4450 */
4451 if (!rename_ctx) {
4452 btrfs_del_inode_ref_in_log(trans, name, inode, dir);
4453 btrfs_del_dir_entries_in_log(trans, name, dir, index);
4454 }
4455
4456 /*
4457 * If we have a pending delayed iput we could end up with the final iput
4458 * being run in btrfs-cleaner context. If we have enough of these built
4459 * up we can end up burning a lot of time in btrfs-cleaner without any
4460 * way to throttle the unlinks. Since we're currently holding a ref on
4461 * the inode we can run the delayed iput here without any issues as the
4462 * final iput won't be done until after we drop the ref we're currently
4463 * holding.
4464 */
4465 btrfs_run_delayed_iput(fs_info, inode);
4466
4467 btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2);
4468 inode_inc_iversion(&inode->vfs_inode);
4469 inode_set_ctime_current(&inode->vfs_inode);
4470 inode_inc_iversion(&dir->vfs_inode);
4471 update_time_after_link_or_unlink(dir);
4472
4473 return btrfs_update_inode(trans, dir);
4474 }
4475
btrfs_unlink_inode(struct btrfs_trans_handle * trans,struct btrfs_inode * dir,struct btrfs_inode * inode,const struct fscrypt_str * name)4476 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4477 struct btrfs_inode *dir, struct btrfs_inode *inode,
4478 const struct fscrypt_str *name)
4479 {
4480 int ret;
4481
4482 ret = __btrfs_unlink_inode(trans, dir, inode, name, NULL);
4483 if (!ret) {
4484 drop_nlink(&inode->vfs_inode);
4485 ret = btrfs_update_inode(trans, inode);
4486 }
4487 return ret;
4488 }
4489
4490 /*
4491 * helper to start transaction for unlink and rmdir.
4492 *
4493 * unlink and rmdir are special in btrfs, they do not always free space, so
4494 * if we cannot make our reservations the normal way try and see if there is
4495 * plenty of slack room in the global reserve to migrate, otherwise we cannot
4496 * allow the unlink to occur.
4497 */
__unlink_start_trans(struct btrfs_inode * dir)4498 static struct btrfs_trans_handle *__unlink_start_trans(struct btrfs_inode *dir)
4499 {
4500 struct btrfs_root *root = dir->root;
4501
4502 return btrfs_start_transaction_fallback_global_rsv(root,
4503 BTRFS_UNLINK_METADATA_UNITS);
4504 }
4505
btrfs_unlink(struct inode * dir,struct dentry * dentry)4506 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
4507 {
4508 struct btrfs_trans_handle *trans;
4509 struct inode *inode = d_inode(dentry);
4510 int ret;
4511 struct fscrypt_name fname;
4512
4513 ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
4514 if (ret)
4515 return ret;
4516
4517 /* This needs to handle no-key deletions later on */
4518
4519 trans = __unlink_start_trans(BTRFS_I(dir));
4520 if (IS_ERR(trans)) {
4521 ret = PTR_ERR(trans);
4522 goto fscrypt_free;
4523 }
4524
4525 btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4526 false);
4527
4528 ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4529 &fname.disk_name);
4530 if (ret)
4531 goto end_trans;
4532
4533 if (inode->i_nlink == 0) {
4534 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
4535 if (ret)
4536 goto end_trans;
4537 }
4538
4539 end_trans:
4540 btrfs_end_transaction(trans);
4541 btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info);
4542 fscrypt_free:
4543 fscrypt_free_filename(&fname);
4544 return ret;
4545 }
4546
btrfs_unlink_subvol(struct btrfs_trans_handle * trans,struct btrfs_inode * dir,struct dentry * dentry)4547 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4548 struct btrfs_inode *dir, struct dentry *dentry)
4549 {
4550 struct btrfs_root *root = dir->root;
4551 struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
4552 BTRFS_PATH_AUTO_FREE(path);
4553 struct extent_buffer *leaf;
4554 struct btrfs_dir_item *di;
4555 struct btrfs_key key;
4556 u64 index;
4557 int ret;
4558 u64 objectid;
4559 u64 dir_ino = btrfs_ino(dir);
4560 struct fscrypt_name fname;
4561
4562 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname);
4563 if (ret)
4564 return ret;
4565
4566 /* This needs to handle no-key deletions later on */
4567
4568 if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
4569 objectid = btrfs_root_id(inode->root);
4570 } else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4571 objectid = inode->ref_root_id;
4572 } else {
4573 WARN_ON(1);
4574 fscrypt_free_filename(&fname);
4575 return -EINVAL;
4576 }
4577
4578 path = btrfs_alloc_path();
4579 if (!path) {
4580 ret = -ENOMEM;
4581 goto out;
4582 }
4583
4584 di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4585 &fname.disk_name, -1);
4586 if (IS_ERR_OR_NULL(di)) {
4587 ret = di ? PTR_ERR(di) : -ENOENT;
4588 goto out;
4589 }
4590
4591 leaf = path->nodes[0];
4592 btrfs_dir_item_key_to_cpu(leaf, di, &key);
4593 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
4594 ret = btrfs_delete_one_dir_name(trans, root, path, di);
4595 if (unlikely(ret)) {
4596 btrfs_abort_transaction(trans, ret);
4597 goto out;
4598 }
4599 btrfs_release_path(path);
4600
4601 /*
4602 * This is a placeholder inode for a subvolume we didn't have a
4603 * reference to at the time of the snapshot creation. In the meantime
4604 * we could have renamed the real subvol link into our snapshot, so
4605 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect.
4606 * Instead simply lookup the dir_index_item for this entry so we can
4607 * remove it. Otherwise we know we have a ref to the root and we can
4608 * call btrfs_del_root_ref, and it _shouldn't_ fail.
4609 */
4610 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4611 di = btrfs_search_dir_index_item(root, path, dir_ino, &fname.disk_name);
4612 if (IS_ERR(di)) {
4613 ret = PTR_ERR(di);
4614 btrfs_abort_transaction(trans, ret);
4615 goto out;
4616 }
4617
4618 leaf = path->nodes[0];
4619 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4620 index = key.offset;
4621 btrfs_release_path(path);
4622 } else {
4623 ret = btrfs_del_root_ref(trans, objectid,
4624 btrfs_root_id(root), dir_ino,
4625 &index, &fname.disk_name);
4626 if (unlikely(ret)) {
4627 btrfs_abort_transaction(trans, ret);
4628 goto out;
4629 }
4630 }
4631
4632 ret = btrfs_delete_delayed_dir_index(trans, dir, index);
4633 if (unlikely(ret)) {
4634 btrfs_abort_transaction(trans, ret);
4635 goto out;
4636 }
4637
4638 btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2);
4639 inode_inc_iversion(&dir->vfs_inode);
4640 inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode));
4641 ret = btrfs_update_inode_fallback(trans, dir);
4642 if (ret)
4643 btrfs_abort_transaction(trans, ret);
4644 out:
4645 fscrypt_free_filename(&fname);
4646 return ret;
4647 }
4648
4649 /*
4650 * Helper to check if the subvolume references other subvolumes or if it's
4651 * default.
4652 */
may_destroy_subvol(struct btrfs_root * root)4653 static noinline int may_destroy_subvol(struct btrfs_root *root)
4654 {
4655 struct btrfs_fs_info *fs_info = root->fs_info;
4656 BTRFS_PATH_AUTO_FREE(path);
4657 struct btrfs_dir_item *di;
4658 struct btrfs_key key;
4659 struct fscrypt_str name = FSTR_INIT("default", 7);
4660 u64 dir_id;
4661 int ret;
4662
4663 path = btrfs_alloc_path();
4664 if (!path)
4665 return -ENOMEM;
4666
4667 /* Make sure this root isn't set as the default subvol */
4668 dir_id = btrfs_super_root_dir(fs_info->super_copy);
4669 di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path,
4670 dir_id, &name, 0);
4671 if (di && !IS_ERR(di)) {
4672 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
4673 if (key.objectid == btrfs_root_id(root)) {
4674 ret = -EPERM;
4675 btrfs_err(fs_info,
4676 "deleting default subvolume %llu is not allowed",
4677 key.objectid);
4678 return ret;
4679 }
4680 btrfs_release_path(path);
4681 }
4682
4683 key.objectid = btrfs_root_id(root);
4684 key.type = BTRFS_ROOT_REF_KEY;
4685 key.offset = (u64)-1;
4686
4687 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4688 if (ret < 0)
4689 return ret;
4690 if (unlikely(ret == 0)) {
4691 /*
4692 * Key with offset -1 found, there would have to exist a root
4693 * with such id, but this is out of valid range.
4694 */
4695 return -EUCLEAN;
4696 }
4697
4698 ret = 0;
4699 if (path->slots[0] > 0) {
4700 path->slots[0]--;
4701 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
4702 if (key.objectid == btrfs_root_id(root) && key.type == BTRFS_ROOT_REF_KEY)
4703 ret = -ENOTEMPTY;
4704 }
4705
4706 return ret;
4707 }
4708
4709 /* Delete all dentries for inodes belonging to the root */
btrfs_prune_dentries(struct btrfs_root * root)4710 static void btrfs_prune_dentries(struct btrfs_root *root)
4711 {
4712 struct btrfs_fs_info *fs_info = root->fs_info;
4713 struct btrfs_inode *inode;
4714 u64 min_ino = 0;
4715
4716 if (!BTRFS_FS_ERROR(fs_info))
4717 WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4718
4719 inode = btrfs_find_first_inode(root, min_ino);
4720 while (inode) {
4721 if (icount_read(&inode->vfs_inode) > 1)
4722 d_prune_aliases(&inode->vfs_inode);
4723
4724 min_ino = btrfs_ino(inode) + 1;
4725 /*
4726 * btrfs_drop_inode() will have it removed from the inode
4727 * cache when its usage count hits zero.
4728 */
4729 iput(&inode->vfs_inode);
4730 cond_resched();
4731 inode = btrfs_find_first_inode(root, min_ino);
4732 }
4733 }
4734
btrfs_delete_subvolume(struct btrfs_inode * dir,struct dentry * dentry)4735 int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
4736 {
4737 struct btrfs_root *root = dir->root;
4738 struct btrfs_fs_info *fs_info = root->fs_info;
4739 struct inode *inode = d_inode(dentry);
4740 struct btrfs_root *dest = BTRFS_I(inode)->root;
4741 struct btrfs_trans_handle *trans;
4742 struct btrfs_block_rsv block_rsv;
4743 u64 root_flags;
4744 u64 qgroup_reserved = 0;
4745 int ret;
4746
4747 down_write(&fs_info->subvol_sem);
4748
4749 /*
4750 * Don't allow to delete a subvolume with send in progress. This is
4751 * inside the inode lock so the error handling that has to drop the bit
4752 * again is not run concurrently.
4753 */
4754 spin_lock(&dest->root_item_lock);
4755 if (dest->send_in_progress) {
4756 spin_unlock(&dest->root_item_lock);
4757 btrfs_warn(fs_info,
4758 "attempt to delete subvolume %llu during send",
4759 btrfs_root_id(dest));
4760 ret = -EPERM;
4761 goto out_up_write;
4762 }
4763 if (atomic_read(&dest->nr_swapfiles)) {
4764 spin_unlock(&dest->root_item_lock);
4765 btrfs_warn(fs_info,
4766 "attempt to delete subvolume %llu with active swapfile",
4767 btrfs_root_id(root));
4768 ret = -EPERM;
4769 goto out_up_write;
4770 }
4771 root_flags = btrfs_root_flags(&dest->root_item);
4772 btrfs_set_root_flags(&dest->root_item,
4773 root_flags | BTRFS_ROOT_SUBVOL_DEAD);
4774 spin_unlock(&dest->root_item_lock);
4775
4776 ret = may_destroy_subvol(dest);
4777 if (ret)
4778 goto out_undead;
4779
4780 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
4781 /*
4782 * One for dir inode,
4783 * two for dir entries,
4784 * two for root ref/backref.
4785 */
4786 ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
4787 if (ret)
4788 goto out_undead;
4789 qgroup_reserved = block_rsv.qgroup_rsv_reserved;
4790
4791 trans = btrfs_start_transaction(root, 0);
4792 if (IS_ERR(trans)) {
4793 ret = PTR_ERR(trans);
4794 goto out_release;
4795 }
4796 btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
4797 qgroup_reserved = 0;
4798 trans->block_rsv = &block_rsv;
4799 trans->bytes_reserved = block_rsv.size;
4800
4801 btrfs_record_snapshot_destroy(trans, dir);
4802
4803 ret = btrfs_unlink_subvol(trans, dir, dentry);
4804 if (unlikely(ret)) {
4805 btrfs_abort_transaction(trans, ret);
4806 goto out_end_trans;
4807 }
4808
4809 ret = btrfs_record_root_in_trans(trans, dest);
4810 if (unlikely(ret)) {
4811 btrfs_abort_transaction(trans, ret);
4812 goto out_end_trans;
4813 }
4814
4815 memset(&dest->root_item.drop_progress, 0,
4816 sizeof(dest->root_item.drop_progress));
4817 btrfs_set_root_drop_level(&dest->root_item, 0);
4818 btrfs_set_root_refs(&dest->root_item, 0);
4819
4820 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
4821 ret = btrfs_insert_orphan_item(trans,
4822 fs_info->tree_root,
4823 btrfs_root_id(dest));
4824 if (unlikely(ret)) {
4825 btrfs_abort_transaction(trans, ret);
4826 goto out_end_trans;
4827 }
4828 }
4829
4830 ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid,
4831 BTRFS_UUID_KEY_SUBVOL, btrfs_root_id(dest));
4832 if (unlikely(ret && ret != -ENOENT)) {
4833 btrfs_abort_transaction(trans, ret);
4834 goto out_end_trans;
4835 }
4836 if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
4837 ret = btrfs_uuid_tree_remove(trans,
4838 dest->root_item.received_uuid,
4839 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4840 btrfs_root_id(dest));
4841 if (unlikely(ret && ret != -ENOENT)) {
4842 btrfs_abort_transaction(trans, ret);
4843 goto out_end_trans;
4844 }
4845 }
4846
4847 free_anon_bdev(dest->anon_dev);
4848 dest->anon_dev = 0;
4849 out_end_trans:
4850 trans->block_rsv = NULL;
4851 trans->bytes_reserved = 0;
4852 ret = btrfs_end_transaction(trans);
4853 inode->i_flags |= S_DEAD;
4854 out_release:
4855 btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL);
4856 if (qgroup_reserved)
4857 btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
4858 out_undead:
4859 if (ret) {
4860 spin_lock(&dest->root_item_lock);
4861 root_flags = btrfs_root_flags(&dest->root_item);
4862 btrfs_set_root_flags(&dest->root_item,
4863 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
4864 spin_unlock(&dest->root_item_lock);
4865 }
4866 out_up_write:
4867 up_write(&fs_info->subvol_sem);
4868 if (!ret) {
4869 d_invalidate(dentry);
4870 btrfs_prune_dentries(dest);
4871 ASSERT(dest->send_in_progress == 0);
4872 }
4873
4874 return ret;
4875 }
4876
btrfs_rmdir(struct inode * vfs_dir,struct dentry * dentry)4877 static int btrfs_rmdir(struct inode *vfs_dir, struct dentry *dentry)
4878 {
4879 struct btrfs_inode *dir = BTRFS_I(vfs_dir);
4880 struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
4881 struct btrfs_fs_info *fs_info = inode->root->fs_info;
4882 int ret = 0;
4883 struct btrfs_trans_handle *trans;
4884 struct fscrypt_name fname;
4885
4886 if (inode->vfs_inode.i_size > BTRFS_EMPTY_DIR_SIZE)
4887 return -ENOTEMPTY;
4888 if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
4889 if (unlikely(btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))) {
4890 btrfs_err(fs_info,
4891 "extent tree v2 doesn't support snapshot deletion yet");
4892 return -EOPNOTSUPP;
4893 }
4894 return btrfs_delete_subvolume(dir, dentry);
4895 }
4896
4897 ret = fscrypt_setup_filename(vfs_dir, &dentry->d_name, 1, &fname);
4898 if (ret)
4899 return ret;
4900
4901 /* This needs to handle no-key deletions later on */
4902
4903 trans = __unlink_start_trans(dir);
4904 if (IS_ERR(trans)) {
4905 ret = PTR_ERR(trans);
4906 goto out_notrans;
4907 }
4908
4909 /*
4910 * Propagate the last_unlink_trans value of the deleted dir to its
4911 * parent directory. This is to prevent an unrecoverable log tree in the
4912 * case we do something like this:
4913 * 1) create dir foo
4914 * 2) create snapshot under dir foo
4915 * 3) delete the snapshot
4916 * 4) rmdir foo
4917 * 5) mkdir foo
4918 * 6) fsync foo or some file inside foo
4919 *
4920 * This is because we can't unlink other roots when replaying the dir
4921 * deletes for directory foo.
4922 */
4923 if (inode->last_unlink_trans >= trans->transid)
4924 btrfs_record_snapshot_destroy(trans, dir);
4925
4926 if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4927 ret = btrfs_unlink_subvol(trans, dir, dentry);
4928 goto out;
4929 }
4930
4931 ret = btrfs_orphan_add(trans, inode);
4932 if (ret)
4933 goto out;
4934
4935 /* now the directory is empty */
4936 ret = btrfs_unlink_inode(trans, dir, inode, &fname.disk_name);
4937 if (!ret)
4938 btrfs_i_size_write(inode, 0);
4939 out:
4940 btrfs_end_transaction(trans);
4941 out_notrans:
4942 btrfs_btree_balance_dirty(fs_info);
4943 fscrypt_free_filename(&fname);
4944
4945 return ret;
4946 }
4947
is_inside_block(u64 bytenr,u64 blockstart,u32 blocksize)4948 static bool is_inside_block(u64 bytenr, u64 blockstart, u32 blocksize)
4949 {
4950 ASSERT(IS_ALIGNED(blockstart, blocksize), "blockstart=%llu blocksize=%u",
4951 blockstart, blocksize);
4952
4953 if (blockstart <= bytenr && bytenr <= blockstart + blocksize - 1)
4954 return true;
4955 return false;
4956 }
4957
truncate_block_zero_beyond_eof(struct btrfs_inode * inode,u64 start)4958 static int truncate_block_zero_beyond_eof(struct btrfs_inode *inode, u64 start)
4959 {
4960 const pgoff_t index = (start >> PAGE_SHIFT);
4961 struct address_space *mapping = inode->vfs_inode.i_mapping;
4962 struct folio *folio;
4963 u64 zero_start;
4964 u64 zero_end;
4965 int ret = 0;
4966
4967 again:
4968 folio = filemap_lock_folio(mapping, index);
4969 /* No folio present. */
4970 if (IS_ERR(folio))
4971 return 0;
4972
4973 if (!folio_test_uptodate(folio)) {
4974 ret = btrfs_read_folio(NULL, folio);
4975 folio_lock(folio);
4976 if (folio->mapping != mapping) {
4977 folio_unlock(folio);
4978 folio_put(folio);
4979 goto again;
4980 }
4981 if (unlikely(!folio_test_uptodate(folio))) {
4982 ret = -EIO;
4983 goto out_unlock;
4984 }
4985 }
4986 folio_wait_writeback(folio);
4987
4988 /*
4989 * We do not need to lock extents nor wait for OE, as it's already
4990 * beyond EOF.
4991 */
4992
4993 zero_start = max_t(u64, folio_pos(folio), start);
4994 zero_end = folio_next_pos(folio);
4995 folio_zero_range(folio, zero_start - folio_pos(folio),
4996 zero_end - zero_start);
4997
4998 out_unlock:
4999 folio_unlock(folio);
5000 folio_put(folio);
5001 return ret;
5002 }
5003
5004 /*
5005 * Handle the truncation of a fs block.
5006 *
5007 * @inode - inode that we're zeroing
5008 * @offset - the file offset of the block to truncate
5009 * The value must be inside [@start, @end], and the function will do
5010 * extra checks if the block that covers @offset needs to be zeroed.
5011 * @start - the start file offset of the range we want to zero
5012 * @end - the end (inclusive) file offset of the range we want to zero.
5013 *
5014 * If the range is not block aligned, read out the folio that covers @offset,
5015 * and if needed zero blocks that are inside the folio and covered by [@start, @end).
5016 * If @start or @end + 1 lands inside a block, that block will be marked dirty
5017 * for writeback.
5018 *
5019 * This is utilized by hole punch, zero range, file expansion.
5020 */
btrfs_truncate_block(struct btrfs_inode * inode,u64 offset,u64 start,u64 end)5021 int btrfs_truncate_block(struct btrfs_inode *inode, u64 offset, u64 start, u64 end)
5022 {
5023 struct btrfs_fs_info *fs_info = inode->root->fs_info;
5024 struct address_space *mapping = inode->vfs_inode.i_mapping;
5025 struct extent_io_tree *io_tree = &inode->io_tree;
5026 struct btrfs_ordered_extent *ordered;
5027 struct extent_state *cached_state = NULL;
5028 struct extent_changeset *data_reserved = NULL;
5029 bool only_release_metadata = false;
5030 u32 blocksize = fs_info->sectorsize;
5031 pgoff_t index = (offset >> PAGE_SHIFT);
5032 struct folio *folio;
5033 gfp_t mask = btrfs_alloc_write_mask(mapping);
5034 int ret = 0;
5035 const bool in_head_block = is_inside_block(offset, round_down(start, blocksize),
5036 blocksize);
5037 const bool in_tail_block = is_inside_block(offset, round_down(end, blocksize),
5038 blocksize);
5039 bool need_truncate_head = false;
5040 bool need_truncate_tail = false;
5041 u64 zero_start;
5042 u64 zero_end;
5043 u64 block_start;
5044 u64 block_end;
5045
5046 /* @offset should be inside the range. */
5047 ASSERT(start <= offset && offset <= end, "offset=%llu start=%llu end=%llu",
5048 offset, start, end);
5049
5050 /* The range is aligned at both ends. */
5051 if (IS_ALIGNED(start, blocksize) && IS_ALIGNED(end + 1, blocksize)) {
5052 /*
5053 * For block size < page size case, we may have polluted blocks
5054 * beyond EOF. So we also need to zero them out.
5055 */
5056 if (end == (u64)-1 && blocksize < PAGE_SIZE)
5057 ret = truncate_block_zero_beyond_eof(inode, start);
5058 goto out;
5059 }
5060
5061 /*
5062 * @offset may not be inside the head nor tail block. In that case we
5063 * don't need to do anything.
5064 */
5065 if (!in_head_block && !in_tail_block)
5066 goto out;
5067
5068 /*
5069 * Skip the truncation if the range in the target block is already aligned.
5070 * The seemingly complex check will also handle the same block case.
5071 */
5072 if (in_head_block && !IS_ALIGNED(start, blocksize))
5073 need_truncate_head = true;
5074 if (in_tail_block && !IS_ALIGNED(end + 1, blocksize))
5075 need_truncate_tail = true;
5076 if (!need_truncate_head && !need_truncate_tail)
5077 goto out;
5078
5079 block_start = round_down(offset, blocksize);
5080 block_end = block_start + blocksize - 1;
5081
5082 ret = btrfs_check_data_free_space(inode, &data_reserved, block_start,
5083 blocksize, false);
5084 if (ret < 0) {
5085 size_t write_bytes = blocksize;
5086
5087 if (btrfs_check_nocow_lock(inode, block_start, &write_bytes, false) > 0) {
5088 /* For nocow case, no need to reserve data space. */
5089 ASSERT(write_bytes == blocksize, "write_bytes=%zu blocksize=%u",
5090 write_bytes, blocksize);
5091 only_release_metadata = true;
5092 } else {
5093 goto out;
5094 }
5095 }
5096 ret = btrfs_delalloc_reserve_metadata(inode, blocksize, blocksize, false);
5097 if (ret < 0) {
5098 if (!only_release_metadata)
5099 btrfs_free_reserved_data_space(inode, data_reserved,
5100 block_start, blocksize);
5101 goto out;
5102 }
5103 again:
5104 folio = __filemap_get_folio(mapping, index,
5105 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask);
5106 if (IS_ERR(folio)) {
5107 if (only_release_metadata)
5108 btrfs_delalloc_release_metadata(inode, blocksize, true);
5109 else
5110 btrfs_delalloc_release_space(inode, data_reserved,
5111 block_start, blocksize, true);
5112 btrfs_delalloc_release_extents(inode, blocksize);
5113 ret = PTR_ERR(folio);
5114 goto out;
5115 }
5116
5117 if (!folio_test_uptodate(folio)) {
5118 ret = btrfs_read_folio(NULL, folio);
5119 folio_lock(folio);
5120 if (folio->mapping != mapping) {
5121 folio_unlock(folio);
5122 folio_put(folio);
5123 goto again;
5124 }
5125 if (unlikely(!folio_test_uptodate(folio))) {
5126 ret = -EIO;
5127 goto out_unlock;
5128 }
5129 }
5130
5131 /*
5132 * We unlock the page after the io is completed and then re-lock it
5133 * above. release_folio() could have come in between that and cleared
5134 * folio private, but left the page in the mapping. Set the page mapped
5135 * here to make sure it's properly set for the subpage stuff.
5136 */
5137 ret = set_folio_extent_mapped(folio);
5138 if (ret < 0)
5139 goto out_unlock;
5140
5141 folio_wait_writeback(folio);
5142
5143 btrfs_lock_extent(io_tree, block_start, block_end, &cached_state);
5144
5145 ordered = btrfs_lookup_ordered_extent(inode, block_start);
5146 if (ordered) {
5147 btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
5148 folio_unlock(folio);
5149 folio_put(folio);
5150 btrfs_start_ordered_extent(ordered);
5151 btrfs_put_ordered_extent(ordered);
5152 goto again;
5153 }
5154
5155 btrfs_clear_extent_bit(&inode->io_tree, block_start, block_end,
5156 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
5157 &cached_state);
5158
5159 ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
5160 &cached_state);
5161 if (ret) {
5162 btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
5163 goto out_unlock;
5164 }
5165
5166 if (end == (u64)-1) {
5167 /*
5168 * We're truncating beyond EOF, the remaining blocks normally are
5169 * already holes thus no need to zero again, but it's possible for
5170 * fs block size < page size cases to have memory mapped writes
5171 * to pollute ranges beyond EOF.
5172 *
5173 * In that case although such polluted blocks beyond EOF will
5174 * not reach disk, it still affects our page caches.
5175 */
5176 zero_start = max_t(u64, folio_pos(folio), start);
5177 zero_end = min_t(u64, folio_next_pos(folio) - 1, end);
5178 } else {
5179 zero_start = max_t(u64, block_start, start);
5180 zero_end = min_t(u64, block_end, end);
5181 }
5182 folio_zero_range(folio, zero_start - folio_pos(folio),
5183 zero_end - zero_start + 1);
5184
5185 btrfs_folio_clear_checked(fs_info, folio, block_start,
5186 block_end + 1 - block_start);
5187 btrfs_folio_set_dirty(fs_info, folio, block_start,
5188 block_end + 1 - block_start);
5189
5190 if (only_release_metadata)
5191 btrfs_set_extent_bit(&inode->io_tree, block_start, block_end,
5192 EXTENT_NORESERVE, &cached_state);
5193
5194 btrfs_unlock_extent(io_tree, block_start, block_end, &cached_state);
5195
5196 out_unlock:
5197 if (ret) {
5198 if (only_release_metadata)
5199 btrfs_delalloc_release_metadata(inode, blocksize, true);
5200 else
5201 btrfs_delalloc_release_space(inode, data_reserved,
5202 block_start, blocksize, true);
5203 }
5204 btrfs_delalloc_release_extents(inode, blocksize);
5205 folio_unlock(folio);
5206 folio_put(folio);
5207 out:
5208 if (only_release_metadata)
5209 btrfs_check_nocow_unlock(inode);
5210 extent_changeset_free(data_reserved);
5211 return ret;
5212 }
5213
maybe_insert_hole(struct btrfs_inode * inode,u64 offset,u64 len)5214 static int maybe_insert_hole(struct btrfs_inode *inode, u64 offset, u64 len)
5215 {
5216 struct btrfs_root *root = inode->root;
5217 struct btrfs_fs_info *fs_info = root->fs_info;
5218 struct btrfs_trans_handle *trans;
5219 struct btrfs_drop_extents_args drop_args = { 0 };
5220 int ret;
5221
5222 /*
5223 * If NO_HOLES is enabled, we don't need to do anything.
5224 * Later, up in the call chain, either btrfs_set_inode_last_sub_trans()
5225 * or btrfs_update_inode() will be called, which guarantee that the next
5226 * fsync will know this inode was changed and needs to be logged.
5227 */
5228 if (btrfs_fs_incompat(fs_info, NO_HOLES))
5229 return 0;
5230
5231 /*
5232 * 1 - for the one we're dropping
5233 * 1 - for the one we're adding
5234 * 1 - for updating the inode.
5235 */
5236 trans = btrfs_start_transaction(root, 3);
5237 if (IS_ERR(trans))
5238 return PTR_ERR(trans);
5239
5240 drop_args.start = offset;
5241 drop_args.end = offset + len;
5242 drop_args.drop_cache = true;
5243
5244 ret = btrfs_drop_extents(trans, root, inode, &drop_args);
5245 if (unlikely(ret)) {
5246 btrfs_abort_transaction(trans, ret);
5247 btrfs_end_transaction(trans);
5248 return ret;
5249 }
5250
5251 ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset, len);
5252 if (ret) {
5253 btrfs_abort_transaction(trans, ret);
5254 } else {
5255 btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found);
5256 btrfs_update_inode(trans, inode);
5257 }
5258 btrfs_end_transaction(trans);
5259 return ret;
5260 }
5261
5262 /*
5263 * This function puts in dummy file extents for the area we're creating a hole
5264 * for. So if we are truncating this file to a larger size we need to insert
5265 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
5266 * the range between oldsize and size
5267 */
btrfs_cont_expand(struct btrfs_inode * inode,loff_t oldsize,loff_t size)5268 int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
5269 {
5270 struct btrfs_root *root = inode->root;
5271 struct btrfs_fs_info *fs_info = root->fs_info;
5272 struct extent_io_tree *io_tree = &inode->io_tree;
5273 struct extent_map *em = NULL;
5274 struct extent_state *cached_state = NULL;
5275 u64 hole_start = ALIGN(oldsize, fs_info->sectorsize);
5276 u64 block_end = ALIGN(size, fs_info->sectorsize);
5277 u64 last_byte;
5278 u64 cur_offset;
5279 u64 hole_size;
5280 int ret = 0;
5281
5282 /*
5283 * If our size started in the middle of a block we need to zero out the
5284 * rest of the block before we expand the i_size, otherwise we could
5285 * expose stale data.
5286 */
5287 ret = btrfs_truncate_block(inode, oldsize, oldsize, -1);
5288 if (ret)
5289 return ret;
5290
5291 if (size <= hole_start)
5292 return 0;
5293
5294 btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1,
5295 &cached_state);
5296 cur_offset = hole_start;
5297 while (1) {
5298 em = btrfs_get_extent(inode, NULL, cur_offset, block_end - cur_offset);
5299 if (IS_ERR(em)) {
5300 ret = PTR_ERR(em);
5301 em = NULL;
5302 break;
5303 }
5304 last_byte = min(btrfs_extent_map_end(em), block_end);
5305 last_byte = ALIGN(last_byte, fs_info->sectorsize);
5306 hole_size = last_byte - cur_offset;
5307
5308 if (!(em->flags & EXTENT_FLAG_PREALLOC)) {
5309 struct extent_map *hole_em;
5310
5311 ret = maybe_insert_hole(inode, cur_offset, hole_size);
5312 if (ret)
5313 break;
5314
5315 ret = btrfs_inode_set_file_extent_range(inode,
5316 cur_offset, hole_size);
5317 if (ret)
5318 break;
5319
5320 hole_em = btrfs_alloc_extent_map();
5321 if (!hole_em) {
5322 btrfs_drop_extent_map_range(inode, cur_offset,
5323 cur_offset + hole_size - 1,
5324 false);
5325 btrfs_set_inode_full_sync(inode);
5326 goto next;
5327 }
5328 hole_em->start = cur_offset;
5329 hole_em->len = hole_size;
5330
5331 hole_em->disk_bytenr = EXTENT_MAP_HOLE;
5332 hole_em->disk_num_bytes = 0;
5333 hole_em->ram_bytes = hole_size;
5334 hole_em->generation = btrfs_get_fs_generation(fs_info);
5335
5336 ret = btrfs_replace_extent_map_range(inode, hole_em, true);
5337 btrfs_free_extent_map(hole_em);
5338 } else {
5339 ret = btrfs_inode_set_file_extent_range(inode,
5340 cur_offset, hole_size);
5341 if (ret)
5342 break;
5343 }
5344 next:
5345 btrfs_free_extent_map(em);
5346 em = NULL;
5347 cur_offset = last_byte;
5348 if (cur_offset >= block_end)
5349 break;
5350 }
5351 btrfs_free_extent_map(em);
5352 btrfs_unlock_extent(io_tree, hole_start, block_end - 1, &cached_state);
5353 return ret;
5354 }
5355
btrfs_setsize(struct inode * inode,struct iattr * attr)5356 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
5357 {
5358 struct btrfs_root *root = BTRFS_I(inode)->root;
5359 struct btrfs_trans_handle *trans;
5360 loff_t oldsize = i_size_read(inode);
5361 loff_t newsize = attr->ia_size;
5362 int mask = attr->ia_valid;
5363 int ret;
5364
5365 /*
5366 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
5367 * special case where we need to update the times despite not having
5368 * these flags set. For all other operations the VFS set these flags
5369 * explicitly if it wants a timestamp update.
5370 */
5371 if (newsize != oldsize) {
5372 inode_inc_iversion(inode);
5373 if (!(mask & (ATTR_CTIME | ATTR_MTIME))) {
5374 inode_set_mtime_to_ts(inode,
5375 inode_set_ctime_current(inode));
5376 }
5377 }
5378
5379 if (newsize > oldsize) {
5380 /*
5381 * Don't do an expanding truncate while snapshotting is ongoing.
5382 * This is to ensure the snapshot captures a fully consistent
5383 * state of this file - if the snapshot captures this expanding
5384 * truncation, it must capture all writes that happened before
5385 * this truncation.
5386 */
5387 btrfs_drew_write_lock(&root->snapshot_lock);
5388 ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, newsize);
5389 if (ret) {
5390 btrfs_drew_write_unlock(&root->snapshot_lock);
5391 return ret;
5392 }
5393
5394 trans = btrfs_start_transaction(root, 1);
5395 if (IS_ERR(trans)) {
5396 btrfs_drew_write_unlock(&root->snapshot_lock);
5397 return PTR_ERR(trans);
5398 }
5399
5400 i_size_write(inode, newsize);
5401 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
5402 pagecache_isize_extended(inode, oldsize, newsize);
5403 ret = btrfs_update_inode(trans, BTRFS_I(inode));
5404 btrfs_drew_write_unlock(&root->snapshot_lock);
5405 btrfs_end_transaction(trans);
5406 } else {
5407 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
5408
5409 if (btrfs_is_zoned(fs_info)) {
5410 ret = btrfs_wait_ordered_range(BTRFS_I(inode),
5411 ALIGN(newsize, fs_info->sectorsize),
5412 (u64)-1);
5413 if (ret)
5414 return ret;
5415 }
5416
5417 /*
5418 * We're truncating a file that used to have good data down to
5419 * zero. Make sure any new writes to the file get on disk
5420 * on close.
5421 */
5422 if (newsize == 0)
5423 set_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
5424 &BTRFS_I(inode)->runtime_flags);
5425
5426 truncate_setsize(inode, newsize);
5427
5428 inode_dio_wait(inode);
5429
5430 ret = btrfs_truncate(BTRFS_I(inode), newsize == oldsize);
5431 if (ret && inode->i_nlink) {
5432 int ret2;
5433
5434 /*
5435 * Truncate failed, so fix up the in-memory size. We
5436 * adjusted disk_i_size down as we removed extents, so
5437 * wait for disk_i_size to be stable and then update the
5438 * in-memory size to match.
5439 */
5440 ret2 = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
5441 if (ret2)
5442 return ret2;
5443 i_size_write(inode, BTRFS_I(inode)->disk_i_size);
5444 }
5445 }
5446
5447 return ret;
5448 }
5449
btrfs_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)5450 static int btrfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
5451 struct iattr *attr)
5452 {
5453 struct inode *inode = d_inode(dentry);
5454 struct btrfs_root *root = BTRFS_I(inode)->root;
5455 int ret;
5456
5457 if (btrfs_root_readonly(root))
5458 return -EROFS;
5459
5460 ret = setattr_prepare(idmap, dentry, attr);
5461 if (ret)
5462 return ret;
5463
5464 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
5465 ret = btrfs_setsize(inode, attr);
5466 if (ret)
5467 return ret;
5468 }
5469
5470 if (attr->ia_valid) {
5471 setattr_copy(idmap, inode, attr);
5472 inode_inc_iversion(inode);
5473 ret = btrfs_dirty_inode(BTRFS_I(inode));
5474
5475 if (!ret && attr->ia_valid & ATTR_MODE)
5476 ret = posix_acl_chmod(idmap, dentry, inode->i_mode);
5477 }
5478
5479 return ret;
5480 }
5481
5482 /*
5483 * While truncating the inode pages during eviction, we get the VFS
5484 * calling btrfs_invalidate_folio() against each folio of the inode. This
5485 * is slow because the calls to btrfs_invalidate_folio() result in a
5486 * huge amount of calls to lock_extent() and clear_extent_bit(),
5487 * which keep merging and splitting extent_state structures over and over,
5488 * wasting lots of time.
5489 *
5490 * Therefore if the inode is being evicted, let btrfs_invalidate_folio()
5491 * skip all those expensive operations on a per folio basis and do only
5492 * the ordered io finishing, while we release here the extent_map and
5493 * extent_state structures, without the excessive merging and splitting.
5494 */
evict_inode_truncate_pages(struct inode * inode)5495 static void evict_inode_truncate_pages(struct inode *inode)
5496 {
5497 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5498 struct rb_node *node;
5499
5500 ASSERT(inode_state_read_once(inode) & I_FREEING);
5501 truncate_inode_pages_final(&inode->i_data);
5502
5503 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
5504
5505 /*
5506 * Keep looping until we have no more ranges in the io tree.
5507 * We can have ongoing bios started by readahead that have
5508 * their endio callback (extent_io.c:end_bio_extent_readpage)
5509 * still in progress (unlocked the pages in the bio but did not yet
5510 * unlocked the ranges in the io tree). Therefore this means some
5511 * ranges can still be locked and eviction started because before
5512 * submitting those bios, which are executed by a separate task (work
5513 * queue kthread), inode references (inode->i_count) were not taken
5514 * (which would be dropped in the end io callback of each bio).
5515 * Therefore here we effectively end up waiting for those bios and
5516 * anyone else holding locked ranges without having bumped the inode's
5517 * reference count - if we don't do it, when they access the inode's
5518 * io_tree to unlock a range it may be too late, leading to an
5519 * use-after-free issue.
5520 */
5521 spin_lock(&io_tree->lock);
5522 while (!RB_EMPTY_ROOT(&io_tree->state)) {
5523 struct extent_state *state;
5524 struct extent_state *cached_state = NULL;
5525 u64 start;
5526 u64 end;
5527 unsigned state_flags;
5528
5529 node = rb_first(&io_tree->state);
5530 state = rb_entry(node, struct extent_state, rb_node);
5531 start = state->start;
5532 end = state->end;
5533 state_flags = state->state;
5534 spin_unlock(&io_tree->lock);
5535
5536 btrfs_lock_extent(io_tree, start, end, &cached_state);
5537
5538 /*
5539 * If still has DELALLOC flag, the extent didn't reach disk,
5540 * and its reserved space won't be freed by delayed_ref.
5541 * So we need to free its reserved space here.
5542 * (Refer to comment in btrfs_invalidate_folio, case 2)
5543 *
5544 * Note, end is the bytenr of last byte, so we need + 1 here.
5545 */
5546 if (state_flags & EXTENT_DELALLOC)
5547 btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start,
5548 end - start + 1, NULL);
5549
5550 btrfs_clear_extent_bit(io_tree, start, end,
5551 EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING,
5552 &cached_state);
5553
5554 cond_resched();
5555 spin_lock(&io_tree->lock);
5556 }
5557 spin_unlock(&io_tree->lock);
5558 }
5559
evict_refill_and_join(struct btrfs_root * root,struct btrfs_block_rsv * rsv)5560 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
5561 struct btrfs_block_rsv *rsv)
5562 {
5563 struct btrfs_fs_info *fs_info = root->fs_info;
5564 struct btrfs_trans_handle *trans;
5565 u64 delayed_refs_extra = btrfs_calc_delayed_ref_bytes(fs_info, 1);
5566 int ret;
5567
5568 /*
5569 * Eviction should be taking place at some place safe because of our
5570 * delayed iputs. However the normal flushing code will run delayed
5571 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock.
5572 *
5573 * We reserve the delayed_refs_extra here again because we can't use
5574 * btrfs_start_transaction(root, 0) for the same deadlocky reason as
5575 * above. We reserve our extra bit here because we generate a ton of
5576 * delayed refs activity by truncating.
5577 *
5578 * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can,
5579 * if we fail to make this reservation we can re-try without the
5580 * delayed_refs_extra so we can make some forward progress.
5581 */
5582 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size + delayed_refs_extra,
5583 BTRFS_RESERVE_FLUSH_EVICT);
5584 if (ret) {
5585 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size,
5586 BTRFS_RESERVE_FLUSH_EVICT);
5587 if (ret) {
5588 btrfs_warn(fs_info,
5589 "could not allocate space for delete; will truncate on mount");
5590 return ERR_PTR(-ENOSPC);
5591 }
5592 delayed_refs_extra = 0;
5593 }
5594
5595 trans = btrfs_join_transaction(root);
5596 if (IS_ERR(trans))
5597 return trans;
5598
5599 if (delayed_refs_extra) {
5600 trans->block_rsv = &fs_info->trans_block_rsv;
5601 trans->bytes_reserved = delayed_refs_extra;
5602 btrfs_block_rsv_migrate(rsv, trans->block_rsv,
5603 delayed_refs_extra, true);
5604 }
5605 return trans;
5606 }
5607
btrfs_evict_inode(struct inode * inode)5608 void btrfs_evict_inode(struct inode *inode)
5609 {
5610 struct btrfs_fs_info *fs_info;
5611 struct btrfs_trans_handle *trans;
5612 struct btrfs_root *root = BTRFS_I(inode)->root;
5613 struct btrfs_block_rsv rsv;
5614 int ret;
5615
5616 trace_btrfs_inode_evict(inode);
5617
5618 if (!root)
5619 goto clear_inode;
5620
5621 fs_info = inode_to_fs_info(inode);
5622 evict_inode_truncate_pages(inode);
5623
5624 if (inode->i_nlink &&
5625 ((btrfs_root_refs(&root->root_item) != 0 &&
5626 btrfs_root_id(root) != BTRFS_ROOT_TREE_OBJECTID) ||
5627 btrfs_is_free_space_inode(BTRFS_I(inode))))
5628 goto out;
5629
5630 if (is_bad_inode(inode))
5631 goto out;
5632
5633 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
5634 goto out;
5635
5636 if (inode->i_nlink > 0) {
5637 BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
5638 btrfs_root_id(root) != BTRFS_ROOT_TREE_OBJECTID);
5639 goto out;
5640 }
5641
5642 /*
5643 * This makes sure the inode item in tree is uptodate and the space for
5644 * the inode update is released.
5645 */
5646 ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode));
5647 if (ret)
5648 goto out;
5649
5650 /*
5651 * This drops any pending insert or delete operations we have for this
5652 * inode. We could have a delayed dir index deletion queued up, but
5653 * we're removing the inode completely so that'll be taken care of in
5654 * the truncate.
5655 */
5656 btrfs_kill_delayed_inode_items(BTRFS_I(inode));
5657
5658 btrfs_init_metadata_block_rsv(fs_info, &rsv, BTRFS_BLOCK_RSV_TEMP);
5659 rsv.size = btrfs_calc_metadata_size(fs_info, 1);
5660 rsv.failfast = true;
5661
5662 btrfs_i_size_write(BTRFS_I(inode), 0);
5663
5664 while (1) {
5665 struct btrfs_truncate_control control = {
5666 .inode = BTRFS_I(inode),
5667 .ino = btrfs_ino(BTRFS_I(inode)),
5668 .new_size = 0,
5669 .min_type = 0,
5670 };
5671
5672 trans = evict_refill_and_join(root, &rsv);
5673 if (IS_ERR(trans))
5674 goto out_release;
5675
5676 trans->block_rsv = &rsv;
5677
5678 ret = btrfs_truncate_inode_items(trans, root, &control);
5679 trans->block_rsv = &fs_info->trans_block_rsv;
5680 btrfs_end_transaction(trans);
5681 /*
5682 * We have not added new delayed items for our inode after we
5683 * have flushed its delayed items, so no need to throttle on
5684 * delayed items. However we have modified extent buffers.
5685 */
5686 btrfs_btree_balance_dirty_nodelay(fs_info);
5687 if (ret && ret != -ENOSPC && ret != -EAGAIN)
5688 goto out_release;
5689 else if (!ret)
5690 break;
5691 }
5692
5693 /*
5694 * Errors here aren't a big deal, it just means we leave orphan items in
5695 * the tree. They will be cleaned up on the next mount. If the inode
5696 * number gets reused, cleanup deletes the orphan item without doing
5697 * anything, and unlink reuses the existing orphan item.
5698 *
5699 * If it turns out that we are dropping too many of these, we might want
5700 * to add a mechanism for retrying these after a commit.
5701 */
5702 trans = evict_refill_and_join(root, &rsv);
5703 if (!IS_ERR(trans)) {
5704 trans->block_rsv = &rsv;
5705 btrfs_orphan_del(trans, BTRFS_I(inode));
5706 trans->block_rsv = &fs_info->trans_block_rsv;
5707 btrfs_end_transaction(trans);
5708 }
5709
5710 out_release:
5711 btrfs_block_rsv_release(fs_info, &rsv, (u64)-1, NULL);
5712 out:
5713 /*
5714 * If we didn't successfully delete, the orphan item will still be in
5715 * the tree and we'll retry on the next mount. Again, we might also want
5716 * to retry these periodically in the future.
5717 */
5718 btrfs_remove_delayed_node(BTRFS_I(inode));
5719 clear_inode:
5720 clear_inode(inode);
5721 }
5722
5723 /*
5724 * Return the key found in the dir entry in the location pointer, fill @type
5725 * with BTRFS_FT_*, and return 0.
5726 *
5727 * If no dir entries were found, returns -ENOENT.
5728 * If found a corrupted location in dir entry, returns -EUCLEAN.
5729 */
btrfs_inode_by_name(struct btrfs_inode * dir,struct dentry * dentry,struct btrfs_key * location,u8 * type)5730 static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry,
5731 struct btrfs_key *location, u8 *type)
5732 {
5733 struct btrfs_dir_item *di;
5734 BTRFS_PATH_AUTO_FREE(path);
5735 struct btrfs_root *root = dir->root;
5736 int ret = 0;
5737 struct fscrypt_name fname;
5738
5739 path = btrfs_alloc_path();
5740 if (!path)
5741 return -ENOMEM;
5742
5743 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname);
5744 if (ret < 0)
5745 return ret;
5746 /*
5747 * fscrypt_setup_filename() should never return a positive value, but
5748 * gcc on sparc/parisc thinks it can, so assert that doesn't happen.
5749 */
5750 ASSERT(ret == 0);
5751
5752 /* This needs to handle no-key deletions later on */
5753
5754 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir),
5755 &fname.disk_name, 0);
5756 if (IS_ERR_OR_NULL(di)) {
5757 ret = di ? PTR_ERR(di) : -ENOENT;
5758 goto out;
5759 }
5760
5761 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
5762 if (unlikely(location->type != BTRFS_INODE_ITEM_KEY &&
5763 location->type != BTRFS_ROOT_ITEM_KEY)) {
5764 ret = -EUCLEAN;
5765 btrfs_warn(root->fs_info,
5766 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location " BTRFS_KEY_FMT ")",
5767 __func__, fname.disk_name.name, btrfs_ino(dir),
5768 BTRFS_KEY_FMT_VALUE(location));
5769 }
5770 if (!ret)
5771 *type = btrfs_dir_ftype(path->nodes[0], di);
5772 out:
5773 fscrypt_free_filename(&fname);
5774 return ret;
5775 }
5776
5777 /*
5778 * when we hit a tree root in a directory, the btrfs part of the inode
5779 * needs to be changed to reflect the root directory of the tree root. This
5780 * is kind of like crossing a mount point.
5781 */
fixup_tree_root_location(struct btrfs_fs_info * fs_info,struct btrfs_inode * dir,struct dentry * dentry,struct btrfs_key * location,struct btrfs_root ** sub_root)5782 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
5783 struct btrfs_inode *dir,
5784 struct dentry *dentry,
5785 struct btrfs_key *location,
5786 struct btrfs_root **sub_root)
5787 {
5788 BTRFS_PATH_AUTO_FREE(path);
5789 struct btrfs_root *new_root;
5790 struct btrfs_root_ref *ref;
5791 struct extent_buffer *leaf;
5792 struct btrfs_key key;
5793 int ret;
5794 int err = 0;
5795 struct fscrypt_name fname;
5796
5797 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 0, &fname);
5798 if (ret)
5799 return ret;
5800
5801 path = btrfs_alloc_path();
5802 if (!path) {
5803 err = -ENOMEM;
5804 goto out;
5805 }
5806
5807 err = -ENOENT;
5808 key.objectid = btrfs_root_id(dir->root);
5809 key.type = BTRFS_ROOT_REF_KEY;
5810 key.offset = location->objectid;
5811
5812 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
5813 if (ret) {
5814 if (ret < 0)
5815 err = ret;
5816 goto out;
5817 }
5818
5819 leaf = path->nodes[0];
5820 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
5821 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
5822 btrfs_root_ref_name_len(leaf, ref) != fname.disk_name.len)
5823 goto out;
5824
5825 ret = memcmp_extent_buffer(leaf, fname.disk_name.name,
5826 (unsigned long)(ref + 1), fname.disk_name.len);
5827 if (ret)
5828 goto out;
5829
5830 btrfs_release_path(path);
5831
5832 new_root = btrfs_get_fs_root(fs_info, location->objectid, true);
5833 if (IS_ERR(new_root)) {
5834 err = PTR_ERR(new_root);
5835 goto out;
5836 }
5837
5838 *sub_root = new_root;
5839 location->objectid = btrfs_root_dirid(&new_root->root_item);
5840 location->type = BTRFS_INODE_ITEM_KEY;
5841 location->offset = 0;
5842 err = 0;
5843 out:
5844 fscrypt_free_filename(&fname);
5845 return err;
5846 }
5847
5848
5849
btrfs_del_inode_from_root(struct btrfs_inode * inode)5850 static void btrfs_del_inode_from_root(struct btrfs_inode *inode)
5851 {
5852 struct btrfs_root *root = inode->root;
5853 struct btrfs_inode *entry;
5854 bool empty = false;
5855
5856 xa_lock(&root->inodes);
5857 /*
5858 * This btrfs_inode is being freed and has already been unhashed at this
5859 * point. It's possible that another btrfs_inode has already been
5860 * allocated for the same inode and inserted itself into the root, so
5861 * don't delete it in that case.
5862 *
5863 * Note that this shouldn't need to allocate memory, so the gfp flags
5864 * don't really matter.
5865 */
5866 entry = __xa_cmpxchg(&root->inodes, btrfs_ino(inode), inode, NULL,
5867 GFP_ATOMIC);
5868 if (entry == inode)
5869 empty = xa_empty(&root->inodes);
5870 xa_unlock(&root->inodes);
5871
5872 if (empty && btrfs_root_refs(&root->root_item) == 0) {
5873 xa_lock(&root->inodes);
5874 empty = xa_empty(&root->inodes);
5875 xa_unlock(&root->inodes);
5876 if (empty)
5877 btrfs_add_dead_root(root);
5878 }
5879 }
5880
5881
btrfs_init_locked_inode(struct inode * inode,void * p)5882 static int btrfs_init_locked_inode(struct inode *inode, void *p)
5883 {
5884 struct btrfs_iget_args *args = p;
5885
5886 btrfs_set_inode_number(BTRFS_I(inode), args->ino);
5887 BTRFS_I(inode)->root = btrfs_grab_root(args->root);
5888
5889 if (args->root && args->root == args->root->fs_info->tree_root &&
5890 args->ino != BTRFS_BTREE_INODE_OBJECTID)
5891 set_bit(BTRFS_INODE_FREE_SPACE_INODE,
5892 &BTRFS_I(inode)->runtime_flags);
5893 return 0;
5894 }
5895
btrfs_find_actor(struct inode * inode,void * opaque)5896 static int btrfs_find_actor(struct inode *inode, void *opaque)
5897 {
5898 struct btrfs_iget_args *args = opaque;
5899
5900 return args->ino == btrfs_ino(BTRFS_I(inode)) &&
5901 args->root == BTRFS_I(inode)->root;
5902 }
5903
btrfs_iget_locked(u64 ino,struct btrfs_root * root)5904 static struct btrfs_inode *btrfs_iget_locked(u64 ino, struct btrfs_root *root)
5905 {
5906 struct inode *inode;
5907 struct btrfs_iget_args args;
5908 unsigned long hashval = btrfs_inode_hash(ino, root);
5909
5910 args.ino = ino;
5911 args.root = root;
5912
5913 inode = iget5_locked_rcu(root->fs_info->sb, hashval, btrfs_find_actor,
5914 btrfs_init_locked_inode,
5915 (void *)&args);
5916 if (!inode)
5917 return NULL;
5918 return BTRFS_I(inode);
5919 }
5920
5921 /*
5922 * Get an inode object given its inode number and corresponding root. Path is
5923 * preallocated to prevent recursing back to iget through allocator.
5924 */
btrfs_iget_path(u64 ino,struct btrfs_root * root,struct btrfs_path * path)5925 struct btrfs_inode *btrfs_iget_path(u64 ino, struct btrfs_root *root,
5926 struct btrfs_path *path)
5927 {
5928 struct btrfs_inode *inode;
5929 int ret;
5930
5931 inode = btrfs_iget_locked(ino, root);
5932 if (!inode)
5933 return ERR_PTR(-ENOMEM);
5934
5935 if (!(inode_state_read_once(&inode->vfs_inode) & I_NEW))
5936 return inode;
5937
5938 ret = btrfs_read_locked_inode(inode, path);
5939 if (ret)
5940 return ERR_PTR(ret);
5941
5942 unlock_new_inode(&inode->vfs_inode);
5943 return inode;
5944 }
5945
5946 /*
5947 * Get an inode object given its inode number and corresponding root.
5948 */
btrfs_iget(u64 ino,struct btrfs_root * root)5949 struct btrfs_inode *btrfs_iget(u64 ino, struct btrfs_root *root)
5950 {
5951 struct btrfs_inode *inode;
5952 struct btrfs_path *path;
5953 int ret;
5954
5955 inode = btrfs_iget_locked(ino, root);
5956 if (!inode)
5957 return ERR_PTR(-ENOMEM);
5958
5959 if (!(inode_state_read_once(&inode->vfs_inode) & I_NEW))
5960 return inode;
5961
5962 path = btrfs_alloc_path();
5963 if (!path) {
5964 iget_failed(&inode->vfs_inode);
5965 return ERR_PTR(-ENOMEM);
5966 }
5967
5968 ret = btrfs_read_locked_inode(inode, path);
5969 btrfs_free_path(path);
5970 if (ret)
5971 return ERR_PTR(ret);
5972
5973 if (S_ISDIR(inode->vfs_inode.i_mode))
5974 inode->vfs_inode.i_opflags |= IOP_FASTPERM_MAY_EXEC;
5975 unlock_new_inode(&inode->vfs_inode);
5976 return inode;
5977 }
5978
new_simple_dir(struct inode * dir,struct btrfs_key * key,struct btrfs_root * root)5979 static struct btrfs_inode *new_simple_dir(struct inode *dir,
5980 struct btrfs_key *key,
5981 struct btrfs_root *root)
5982 {
5983 struct timespec64 ts;
5984 struct inode *vfs_inode;
5985 struct btrfs_inode *inode;
5986
5987 vfs_inode = new_inode(dir->i_sb);
5988 if (!vfs_inode)
5989 return ERR_PTR(-ENOMEM);
5990
5991 inode = BTRFS_I(vfs_inode);
5992 inode->root = btrfs_grab_root(root);
5993 inode->ref_root_id = key->objectid;
5994 set_bit(BTRFS_INODE_ROOT_STUB, &inode->runtime_flags);
5995 set_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags);
5996
5997 btrfs_set_inode_number(inode, BTRFS_EMPTY_SUBVOL_DIR_OBJECTID);
5998 /*
5999 * We only need lookup, the rest is read-only and there's no inode
6000 * associated with the dentry
6001 */
6002 vfs_inode->i_op = &simple_dir_inode_operations;
6003 vfs_inode->i_opflags &= ~IOP_XATTR;
6004 vfs_inode->i_fop = &simple_dir_operations;
6005 vfs_inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
6006
6007 ts = inode_set_ctime_current(vfs_inode);
6008 inode_set_mtime_to_ts(vfs_inode, ts);
6009 inode_set_atime_to_ts(vfs_inode, inode_get_atime(dir));
6010 inode->i_otime_sec = ts.tv_sec;
6011 inode->i_otime_nsec = ts.tv_nsec;
6012
6013 vfs_inode->i_uid = dir->i_uid;
6014 vfs_inode->i_gid = dir->i_gid;
6015
6016 return inode;
6017 }
6018
6019 static_assert(BTRFS_FT_UNKNOWN == FT_UNKNOWN);
6020 static_assert(BTRFS_FT_REG_FILE == FT_REG_FILE);
6021 static_assert(BTRFS_FT_DIR == FT_DIR);
6022 static_assert(BTRFS_FT_CHRDEV == FT_CHRDEV);
6023 static_assert(BTRFS_FT_BLKDEV == FT_BLKDEV);
6024 static_assert(BTRFS_FT_FIFO == FT_FIFO);
6025 static_assert(BTRFS_FT_SOCK == FT_SOCK);
6026 static_assert(BTRFS_FT_SYMLINK == FT_SYMLINK);
6027
btrfs_inode_type(const struct btrfs_inode * inode)6028 static inline u8 btrfs_inode_type(const struct btrfs_inode *inode)
6029 {
6030 return fs_umode_to_ftype(inode->vfs_inode.i_mode);
6031 }
6032
btrfs_lookup_dentry(struct inode * dir,struct dentry * dentry)6033 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
6034 {
6035 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
6036 struct btrfs_inode *inode;
6037 struct btrfs_root *root = BTRFS_I(dir)->root;
6038 struct btrfs_root *sub_root = root;
6039 struct btrfs_key location = { 0 };
6040 u8 di_type = 0;
6041 int ret = 0;
6042
6043 if (dentry->d_name.len > BTRFS_NAME_LEN)
6044 return ERR_PTR(-ENAMETOOLONG);
6045
6046 ret = btrfs_inode_by_name(BTRFS_I(dir), dentry, &location, &di_type);
6047 if (ret < 0)
6048 return ERR_PTR(ret);
6049
6050 if (location.type == BTRFS_INODE_ITEM_KEY) {
6051 inode = btrfs_iget(location.objectid, root);
6052 if (IS_ERR(inode))
6053 return ERR_CAST(inode);
6054
6055 /* Do extra check against inode mode with di_type */
6056 if (unlikely(btrfs_inode_type(inode) != di_type)) {
6057 btrfs_crit(fs_info,
6058 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u",
6059 inode->vfs_inode.i_mode, btrfs_inode_type(inode),
6060 di_type);
6061 iput(&inode->vfs_inode);
6062 return ERR_PTR(-EUCLEAN);
6063 }
6064 return &inode->vfs_inode;
6065 }
6066
6067 ret = fixup_tree_root_location(fs_info, BTRFS_I(dir), dentry,
6068 &location, &sub_root);
6069 if (ret < 0) {
6070 if (ret != -ENOENT)
6071 inode = ERR_PTR(ret);
6072 else
6073 inode = new_simple_dir(dir, &location, root);
6074 } else {
6075 inode = btrfs_iget(location.objectid, sub_root);
6076 btrfs_put_root(sub_root);
6077
6078 if (IS_ERR(inode))
6079 return ERR_CAST(inode);
6080
6081 down_read(&fs_info->cleanup_work_sem);
6082 if (!sb_rdonly(inode->vfs_inode.i_sb))
6083 ret = btrfs_orphan_cleanup(sub_root);
6084 up_read(&fs_info->cleanup_work_sem);
6085 if (ret) {
6086 iput(&inode->vfs_inode);
6087 inode = ERR_PTR(ret);
6088 }
6089 }
6090
6091 if (IS_ERR(inode))
6092 return ERR_CAST(inode);
6093
6094 return &inode->vfs_inode;
6095 }
6096
btrfs_dentry_delete(const struct dentry * dentry)6097 static int btrfs_dentry_delete(const struct dentry *dentry)
6098 {
6099 struct btrfs_root *root;
6100 struct inode *inode = d_inode(dentry);
6101
6102 if (!inode && !IS_ROOT(dentry))
6103 inode = d_inode(dentry->d_parent);
6104
6105 if (inode) {
6106 root = BTRFS_I(inode)->root;
6107 if (btrfs_root_refs(&root->root_item) == 0)
6108 return 1;
6109
6110 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
6111 return 1;
6112 }
6113 return 0;
6114 }
6115
btrfs_lookup(struct inode * dir,struct dentry * dentry,unsigned int flags)6116 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
6117 unsigned int flags)
6118 {
6119 struct inode *inode = btrfs_lookup_dentry(dir, dentry);
6120
6121 if (inode == ERR_PTR(-ENOENT))
6122 inode = NULL;
6123 return d_splice_alias(inode, dentry);
6124 }
6125
6126 /*
6127 * Find the highest existing sequence number in a directory and then set the
6128 * in-memory index_cnt variable to the first free sequence number.
6129 */
btrfs_set_inode_index_count(struct btrfs_inode * inode)6130 static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
6131 {
6132 struct btrfs_root *root = inode->root;
6133 struct btrfs_key key, found_key;
6134 BTRFS_PATH_AUTO_FREE(path);
6135 struct extent_buffer *leaf;
6136 int ret;
6137
6138 key.objectid = btrfs_ino(inode);
6139 key.type = BTRFS_DIR_INDEX_KEY;
6140 key.offset = (u64)-1;
6141
6142 path = btrfs_alloc_path();
6143 if (!path)
6144 return -ENOMEM;
6145
6146 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6147 if (ret < 0)
6148 return ret;
6149
6150 if (unlikely(ret == 0)) {
6151 /*
6152 * Key with offset -1 found, there would have to exist a dir
6153 * index item with such offset, but this is out of the valid
6154 * range.
6155 */
6156 btrfs_err(root->fs_info,
6157 "unexpected exact match for DIR_INDEX key, inode %llu",
6158 btrfs_ino(inode));
6159 return -EUCLEAN;
6160 }
6161
6162 if (path->slots[0] == 0) {
6163 inode->index_cnt = BTRFS_DIR_START_INDEX;
6164 return 0;
6165 }
6166
6167 path->slots[0]--;
6168
6169 leaf = path->nodes[0];
6170 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6171
6172 if (found_key.objectid != btrfs_ino(inode) ||
6173 found_key.type != BTRFS_DIR_INDEX_KEY) {
6174 inode->index_cnt = BTRFS_DIR_START_INDEX;
6175 return 0;
6176 }
6177
6178 inode->index_cnt = found_key.offset + 1;
6179
6180 return 0;
6181 }
6182
btrfs_get_dir_last_index(struct btrfs_inode * dir,u64 * index)6183 static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index)
6184 {
6185 int ret = 0;
6186
6187 btrfs_inode_lock(dir, 0);
6188 if (dir->index_cnt == (u64)-1) {
6189 ret = btrfs_inode_delayed_dir_index_count(dir);
6190 if (ret) {
6191 ret = btrfs_set_inode_index_count(dir);
6192 if (ret)
6193 goto out;
6194 }
6195 }
6196
6197 /* index_cnt is the index number of next new entry, so decrement it. */
6198 *index = dir->index_cnt - 1;
6199 out:
6200 btrfs_inode_unlock(dir, 0);
6201
6202 return ret;
6203 }
6204
6205 /*
6206 * All this infrastructure exists because dir_emit can fault, and we are holding
6207 * the tree lock when doing readdir. For now just allocate a buffer and copy
6208 * our information into that, and then dir_emit from the buffer. This is
6209 * similar to what NFS does, only we don't keep the buffer around in pagecache
6210 * because I'm afraid I'll mess that up. Long term we need to make filldir do
6211 * copy_to_user_inatomic so we don't have to worry about page faulting under the
6212 * tree lock.
6213 */
btrfs_opendir(struct inode * inode,struct file * file)6214 static int btrfs_opendir(struct inode *inode, struct file *file)
6215 {
6216 struct btrfs_file_private *private;
6217 u64 last_index;
6218 int ret;
6219
6220 ret = btrfs_get_dir_last_index(BTRFS_I(inode), &last_index);
6221 if (ret)
6222 return ret;
6223
6224 private = kzalloc_obj(struct btrfs_file_private);
6225 if (!private)
6226 return -ENOMEM;
6227 private->last_index = last_index;
6228 private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
6229 if (!private->filldir_buf) {
6230 kfree(private);
6231 return -ENOMEM;
6232 }
6233 file->private_data = private;
6234 return 0;
6235 }
6236
btrfs_dir_llseek(struct file * file,loff_t offset,int whence)6237 static loff_t btrfs_dir_llseek(struct file *file, loff_t offset, int whence)
6238 {
6239 struct btrfs_file_private *private = file->private_data;
6240 int ret;
6241
6242 ret = btrfs_get_dir_last_index(BTRFS_I(file_inode(file)),
6243 &private->last_index);
6244 if (ret)
6245 return ret;
6246
6247 return generic_file_llseek(file, offset, whence);
6248 }
6249
6250 struct dir_entry {
6251 u64 ino;
6252 u64 offset;
6253 unsigned type;
6254 int name_len;
6255 };
6256
btrfs_filldir(void * addr,int entries,struct dir_context * ctx)6257 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
6258 {
6259 while (entries--) {
6260 struct dir_entry *entry = addr;
6261 char *name = (char *)(entry + 1);
6262
6263 ctx->pos = get_unaligned(&entry->offset);
6264 if (!dir_emit(ctx, name, get_unaligned(&entry->name_len),
6265 get_unaligned(&entry->ino),
6266 get_unaligned(&entry->type)))
6267 return 1;
6268 addr += sizeof(struct dir_entry) +
6269 get_unaligned(&entry->name_len);
6270 ctx->pos++;
6271 }
6272 return 0;
6273 }
6274
btrfs_real_readdir(struct file * file,struct dir_context * ctx)6275 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
6276 {
6277 struct inode *inode = file_inode(file);
6278 struct btrfs_root *root = BTRFS_I(inode)->root;
6279 struct btrfs_file_private *private = file->private_data;
6280 struct btrfs_dir_item *di;
6281 struct btrfs_key key;
6282 struct btrfs_key found_key;
6283 BTRFS_PATH_AUTO_FREE(path);
6284 void *addr;
6285 LIST_HEAD(ins_list);
6286 LIST_HEAD(del_list);
6287 int ret;
6288 char *name_ptr;
6289 int name_len;
6290 int entries = 0;
6291 int total_len = 0;
6292 bool put = false;
6293 struct btrfs_key location;
6294
6295 if (!dir_emit_dots(file, ctx))
6296 return 0;
6297
6298 path = btrfs_alloc_path();
6299 if (!path)
6300 return -ENOMEM;
6301
6302 addr = private->filldir_buf;
6303 path->reada = READA_FORWARD;
6304
6305 put = btrfs_readdir_get_delayed_items(BTRFS_I(inode), private->last_index,
6306 &ins_list, &del_list);
6307
6308 again:
6309 key.type = BTRFS_DIR_INDEX_KEY;
6310 key.offset = ctx->pos;
6311 key.objectid = btrfs_ino(BTRFS_I(inode));
6312
6313 btrfs_for_each_slot(root, &key, &found_key, path, ret) {
6314 struct dir_entry *entry;
6315 struct extent_buffer *leaf = path->nodes[0];
6316 u8 ftype;
6317
6318 if (found_key.objectid != key.objectid)
6319 break;
6320 if (found_key.type != BTRFS_DIR_INDEX_KEY)
6321 break;
6322 if (found_key.offset < ctx->pos)
6323 continue;
6324 if (found_key.offset > private->last_index)
6325 break;
6326 if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
6327 continue;
6328 di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
6329 name_len = btrfs_dir_name_len(leaf, di);
6330 if ((total_len + sizeof(struct dir_entry) + name_len) >=
6331 PAGE_SIZE) {
6332 btrfs_release_path(path);
6333 ret = btrfs_filldir(private->filldir_buf, entries, ctx);
6334 if (ret)
6335 goto nopos;
6336 addr = private->filldir_buf;
6337 entries = 0;
6338 total_len = 0;
6339 goto again;
6340 }
6341
6342 ftype = btrfs_dir_flags_to_ftype(btrfs_dir_flags(leaf, di));
6343 entry = addr;
6344 name_ptr = (char *)(entry + 1);
6345 read_extent_buffer(leaf, name_ptr,
6346 (unsigned long)(di + 1), name_len);
6347 put_unaligned(name_len, &entry->name_len);
6348 put_unaligned(fs_ftype_to_dtype(ftype), &entry->type);
6349 btrfs_dir_item_key_to_cpu(leaf, di, &location);
6350 put_unaligned(location.objectid, &entry->ino);
6351 put_unaligned(found_key.offset, &entry->offset);
6352 entries++;
6353 addr += sizeof(struct dir_entry) + name_len;
6354 total_len += sizeof(struct dir_entry) + name_len;
6355 }
6356 /* Catch error encountered during iteration */
6357 if (ret < 0)
6358 goto err;
6359
6360 btrfs_release_path(path);
6361
6362 ret = btrfs_filldir(private->filldir_buf, entries, ctx);
6363 if (ret)
6364 goto nopos;
6365
6366 if (btrfs_readdir_delayed_dir_index(ctx, &ins_list))
6367 goto nopos;
6368
6369 /*
6370 * Stop new entries from being returned after we return the last
6371 * entry.
6372 *
6373 * New directory entries are assigned a strictly increasing
6374 * offset. This means that new entries created during readdir
6375 * are *guaranteed* to be seen in the future by that readdir.
6376 * This has broken buggy programs which operate on names as
6377 * they're returned by readdir. Until we reuse freed offsets
6378 * we have this hack to stop new entries from being returned
6379 * under the assumption that they'll never reach this huge
6380 * offset.
6381 *
6382 * This is being careful not to overflow 32bit loff_t unless the
6383 * last entry requires it because doing so has broken 32bit apps
6384 * in the past.
6385 */
6386 if (ctx->pos >= INT_MAX)
6387 ctx->pos = LLONG_MAX;
6388 else
6389 ctx->pos = INT_MAX;
6390 nopos:
6391 ret = 0;
6392 err:
6393 if (put)
6394 btrfs_readdir_put_delayed_items(BTRFS_I(inode), &ins_list, &del_list);
6395 return ret;
6396 }
6397
6398 /*
6399 * This is somewhat expensive, updating the tree every time the
6400 * inode changes. But, it is most likely to find the inode in cache.
6401 * FIXME, needs more benchmarking...there are no reasons other than performance
6402 * to keep or drop this code.
6403 */
btrfs_dirty_inode(struct btrfs_inode * inode)6404 static int btrfs_dirty_inode(struct btrfs_inode *inode)
6405 {
6406 struct btrfs_root *root = inode->root;
6407 struct btrfs_fs_info *fs_info = root->fs_info;
6408 struct btrfs_trans_handle *trans;
6409 int ret;
6410
6411 if (test_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags))
6412 return 0;
6413
6414 trans = btrfs_join_transaction(root);
6415 if (IS_ERR(trans))
6416 return PTR_ERR(trans);
6417
6418 ret = btrfs_update_inode(trans, inode);
6419 if (ret == -ENOSPC || ret == -EDQUOT) {
6420 /* whoops, lets try again with the full transaction */
6421 btrfs_end_transaction(trans);
6422 trans = btrfs_start_transaction(root, 1);
6423 if (IS_ERR(trans))
6424 return PTR_ERR(trans);
6425
6426 ret = btrfs_update_inode(trans, inode);
6427 }
6428 btrfs_end_transaction(trans);
6429 if (inode->delayed_node)
6430 btrfs_balance_delayed_items(fs_info);
6431
6432 return ret;
6433 }
6434
6435 /*
6436 * We need our own ->update_time so that we can return error on ENOSPC for
6437 * updating the inode in the case of file write and mmap writes.
6438 */
btrfs_update_time(struct inode * inode,enum fs_update_time type,unsigned int flags)6439 static int btrfs_update_time(struct inode *inode, enum fs_update_time type,
6440 unsigned int flags)
6441 {
6442 struct btrfs_root *root = BTRFS_I(inode)->root;
6443 int dirty;
6444
6445 if (btrfs_root_readonly(root))
6446 return -EROFS;
6447 if (flags & IOCB_NOWAIT)
6448 return -EAGAIN;
6449
6450 dirty = inode_update_time(inode, type, flags);
6451 if (dirty <= 0)
6452 return dirty;
6453 return btrfs_dirty_inode(BTRFS_I(inode));
6454 }
6455
6456 /*
6457 * helper to find a free sequence number in a given directory. This current
6458 * code is very simple, later versions will do smarter things in the btree
6459 */
btrfs_set_inode_index(struct btrfs_inode * dir,u64 * index)6460 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index)
6461 {
6462 int ret = 0;
6463
6464 if (dir->index_cnt == (u64)-1) {
6465 ret = btrfs_inode_delayed_dir_index_count(dir);
6466 if (ret) {
6467 ret = btrfs_set_inode_index_count(dir);
6468 if (ret)
6469 return ret;
6470 }
6471 }
6472
6473 *index = dir->index_cnt;
6474 dir->index_cnt++;
6475
6476 return ret;
6477 }
6478
btrfs_insert_inode_locked(struct inode * inode)6479 static int btrfs_insert_inode_locked(struct inode *inode)
6480 {
6481 struct btrfs_iget_args args;
6482
6483 args.ino = btrfs_ino(BTRFS_I(inode));
6484 args.root = BTRFS_I(inode)->root;
6485
6486 return insert_inode_locked4(inode,
6487 btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
6488 btrfs_find_actor, &args);
6489 }
6490
btrfs_new_inode_prepare(struct btrfs_new_inode_args * args,unsigned int * trans_num_items)6491 int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args,
6492 unsigned int *trans_num_items)
6493 {
6494 struct inode *dir = args->dir;
6495 struct inode *inode = args->inode;
6496 int ret;
6497
6498 if (!args->orphan) {
6499 ret = fscrypt_setup_filename(dir, &args->dentry->d_name, 0,
6500 &args->fname);
6501 if (ret)
6502 return ret;
6503 }
6504
6505 ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl);
6506 if (ret) {
6507 fscrypt_free_filename(&args->fname);
6508 return ret;
6509 }
6510
6511 /* 1 to add inode item */
6512 *trans_num_items = 1;
6513 /* 1 to add compression property */
6514 if (BTRFS_I(dir)->prop_compress)
6515 (*trans_num_items)++;
6516 /* 1 to add default ACL xattr */
6517 if (args->default_acl)
6518 (*trans_num_items)++;
6519 /* 1 to add access ACL xattr */
6520 if (args->acl)
6521 (*trans_num_items)++;
6522 #ifdef CONFIG_SECURITY
6523 /* 1 to add LSM xattr */
6524 if (dir->i_security)
6525 (*trans_num_items)++;
6526 #endif
6527 if (args->orphan) {
6528 /* 1 to add orphan item */
6529 (*trans_num_items)++;
6530 } else {
6531 /*
6532 * 1 to add dir item
6533 * 1 to add dir index
6534 * 1 to update parent inode item
6535 *
6536 * No need for 1 unit for the inode ref item because it is
6537 * inserted in a batch together with the inode item at
6538 * btrfs_create_new_inode().
6539 */
6540 *trans_num_items += 3;
6541 }
6542 return 0;
6543 }
6544
btrfs_new_inode_args_destroy(struct btrfs_new_inode_args * args)6545 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args)
6546 {
6547 posix_acl_release(args->acl);
6548 posix_acl_release(args->default_acl);
6549 fscrypt_free_filename(&args->fname);
6550 }
6551
6552 /*
6553 * Inherit flags from the parent inode.
6554 *
6555 * Currently only the compression flags and the cow flags are inherited.
6556 */
btrfs_inherit_iflags(struct btrfs_inode * inode,struct btrfs_inode * dir)6557 static void btrfs_inherit_iflags(struct btrfs_inode *inode, struct btrfs_inode *dir)
6558 {
6559 unsigned int flags;
6560
6561 flags = dir->flags;
6562
6563 if (flags & BTRFS_INODE_NOCOMPRESS) {
6564 inode->flags &= ~BTRFS_INODE_COMPRESS;
6565 inode->flags |= BTRFS_INODE_NOCOMPRESS;
6566 } else if (flags & BTRFS_INODE_COMPRESS) {
6567 inode->flags &= ~BTRFS_INODE_NOCOMPRESS;
6568 inode->flags |= BTRFS_INODE_COMPRESS;
6569 }
6570
6571 if (flags & BTRFS_INODE_NODATACOW) {
6572 inode->flags |= BTRFS_INODE_NODATACOW;
6573 if (S_ISREG(inode->vfs_inode.i_mode))
6574 inode->flags |= BTRFS_INODE_NODATASUM;
6575 }
6576
6577 btrfs_sync_inode_flags_to_i_flags(inode);
6578 }
6579
btrfs_create_new_inode(struct btrfs_trans_handle * trans,struct btrfs_new_inode_args * args)6580 int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
6581 struct btrfs_new_inode_args *args)
6582 {
6583 struct timespec64 ts;
6584 struct inode *dir = args->dir;
6585 struct inode *inode = args->inode;
6586 const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name;
6587 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
6588 struct btrfs_root *root;
6589 struct btrfs_inode_item *inode_item;
6590 struct btrfs_path *path;
6591 u64 objectid;
6592 struct btrfs_inode_ref *ref;
6593 struct btrfs_key key[2];
6594 u32 sizes[2];
6595 struct btrfs_item_batch batch;
6596 unsigned long ptr;
6597 int ret;
6598 bool xa_reserved = false;
6599
6600 path = btrfs_alloc_path();
6601 if (!path)
6602 return -ENOMEM;
6603
6604 if (!args->subvol)
6605 BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root);
6606 root = BTRFS_I(inode)->root;
6607
6608 ret = btrfs_init_file_extent_tree(BTRFS_I(inode));
6609 if (ret)
6610 goto out;
6611
6612 ret = btrfs_get_free_objectid(root, &objectid);
6613 if (ret)
6614 goto out;
6615 btrfs_set_inode_number(BTRFS_I(inode), objectid);
6616
6617 ret = xa_reserve(&root->inodes, objectid, GFP_NOFS);
6618 if (ret)
6619 goto out;
6620 xa_reserved = true;
6621
6622 if (args->orphan) {
6623 /*
6624 * O_TMPFILE, set link count to 0, so that after this point, we
6625 * fill in an inode item with the correct link count.
6626 */
6627 set_nlink(inode, 0);
6628 } else {
6629 trace_btrfs_inode_request(dir);
6630
6631 ret = btrfs_set_inode_index(BTRFS_I(dir), &BTRFS_I(inode)->dir_index);
6632 if (ret)
6633 goto out;
6634 }
6635
6636 if (S_ISDIR(inode->i_mode))
6637 BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX;
6638
6639 BTRFS_I(inode)->generation = trans->transid;
6640 inode->i_generation = BTRFS_I(inode)->generation;
6641
6642 /*
6643 * We don't have any capability xattrs set here yet, shortcut any
6644 * queries for the xattrs here. If we add them later via the inode
6645 * security init path or any other path this flag will be cleared.
6646 */
6647 set_bit(BTRFS_INODE_NO_CAP_XATTR, &BTRFS_I(inode)->runtime_flags);
6648
6649 /*
6650 * Subvolumes don't inherit flags from their parent directory.
6651 * Originally this was probably by accident, but we probably can't
6652 * change it now without compatibility issues.
6653 */
6654 if (!args->subvol)
6655 btrfs_inherit_iflags(BTRFS_I(inode), BTRFS_I(dir));
6656
6657 btrfs_set_inode_mapping_order(BTRFS_I(inode));
6658 if (S_ISREG(inode->i_mode)) {
6659 if (btrfs_test_opt(fs_info, NODATASUM))
6660 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6661 if (btrfs_test_opt(fs_info, NODATACOW))
6662 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
6663 BTRFS_INODE_NODATASUM;
6664 btrfs_update_inode_mapping_flags(BTRFS_I(inode));
6665 }
6666
6667 ret = btrfs_insert_inode_locked(inode);
6668 if (ret < 0) {
6669 if (!args->orphan)
6670 BTRFS_I(dir)->index_cnt--;
6671 goto out;
6672 }
6673
6674 /*
6675 * We could have gotten an inode number from somebody who was fsynced
6676 * and then removed in this same transaction, so let's just set full
6677 * sync since it will be a full sync anyway and this will blow away the
6678 * old info in the log.
6679 */
6680 btrfs_set_inode_full_sync(BTRFS_I(inode));
6681
6682 key[0].objectid = objectid;
6683 key[0].type = BTRFS_INODE_ITEM_KEY;
6684 key[0].offset = 0;
6685
6686 sizes[0] = sizeof(struct btrfs_inode_item);
6687
6688 if (!args->orphan) {
6689 /*
6690 * Start new inodes with an inode_ref. This is slightly more
6691 * efficient for small numbers of hard links since they will
6692 * be packed into one item. Extended refs will kick in if we
6693 * add more hard links than can fit in the ref item.
6694 */
6695 key[1].objectid = objectid;
6696 key[1].type = BTRFS_INODE_REF_KEY;
6697 if (args->subvol) {
6698 key[1].offset = objectid;
6699 sizes[1] = 2 + sizeof(*ref);
6700 } else {
6701 key[1].offset = btrfs_ino(BTRFS_I(dir));
6702 sizes[1] = name->len + sizeof(*ref);
6703 }
6704 }
6705
6706 batch.keys = &key[0];
6707 batch.data_sizes = &sizes[0];
6708 batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]);
6709 batch.nr = args->orphan ? 1 : 2;
6710 ret = btrfs_insert_empty_items(trans, root, path, &batch);
6711 if (unlikely(ret != 0)) {
6712 btrfs_abort_transaction(trans, ret);
6713 goto discard;
6714 }
6715
6716 ts = simple_inode_init_ts(inode);
6717 BTRFS_I(inode)->i_otime_sec = ts.tv_sec;
6718 BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec;
6719
6720 /*
6721 * We're going to fill the inode item now, so at this point the inode
6722 * must be fully initialized.
6723 */
6724
6725 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
6726 struct btrfs_inode_item);
6727 memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item,
6728 sizeof(*inode_item));
6729 fill_inode_item(trans, path->nodes[0], inode_item, inode);
6730
6731 if (!args->orphan) {
6732 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
6733 struct btrfs_inode_ref);
6734 ptr = (unsigned long)(ref + 1);
6735 if (args->subvol) {
6736 btrfs_set_inode_ref_name_len(path->nodes[0], ref, 2);
6737 btrfs_set_inode_ref_index(path->nodes[0], ref, 0);
6738 write_extent_buffer(path->nodes[0], "..", ptr, 2);
6739 } else {
6740 btrfs_set_inode_ref_name_len(path->nodes[0], ref,
6741 name->len);
6742 btrfs_set_inode_ref_index(path->nodes[0], ref,
6743 BTRFS_I(inode)->dir_index);
6744 write_extent_buffer(path->nodes[0], name->name, ptr,
6745 name->len);
6746 }
6747 }
6748
6749 /*
6750 * We don't need the path anymore, plus inheriting properties, adding
6751 * ACLs, security xattrs, orphan item or adding the link, will result in
6752 * allocating yet another path. So just free our path.
6753 */
6754 btrfs_free_path(path);
6755 path = NULL;
6756
6757 if (args->subvol) {
6758 struct btrfs_inode *parent;
6759
6760 /*
6761 * Subvolumes inherit properties from their parent subvolume,
6762 * not the directory they were created in.
6763 */
6764 parent = btrfs_iget(BTRFS_FIRST_FREE_OBJECTID, BTRFS_I(dir)->root);
6765 if (IS_ERR(parent)) {
6766 ret = PTR_ERR(parent);
6767 } else {
6768 ret = btrfs_inode_inherit_props(trans, BTRFS_I(inode),
6769 parent);
6770 iput(&parent->vfs_inode);
6771 }
6772 } else {
6773 ret = btrfs_inode_inherit_props(trans, BTRFS_I(inode),
6774 BTRFS_I(dir));
6775 }
6776 if (ret) {
6777 btrfs_err(fs_info,
6778 "error inheriting props for ino %llu (root %llu): %d",
6779 btrfs_ino(BTRFS_I(inode)), btrfs_root_id(root), ret);
6780 }
6781
6782 /*
6783 * Subvolumes don't inherit ACLs or get passed to the LSM. This is
6784 * probably a bug.
6785 */
6786 if (!args->subvol) {
6787 ret = btrfs_init_inode_security(trans, args);
6788 if (unlikely(ret)) {
6789 btrfs_abort_transaction(trans, ret);
6790 goto discard;
6791 }
6792 }
6793
6794 ret = btrfs_add_inode_to_root(BTRFS_I(inode), false);
6795 if (WARN_ON(ret)) {
6796 /* Shouldn't happen, we used xa_reserve() before. */
6797 btrfs_abort_transaction(trans, ret);
6798 goto discard;
6799 }
6800
6801 trace_btrfs_inode_new(inode);
6802 btrfs_set_inode_last_trans(trans, BTRFS_I(inode));
6803
6804 btrfs_update_root_times(trans, root);
6805
6806 if (args->orphan) {
6807 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
6808 if (unlikely(ret)) {
6809 btrfs_abort_transaction(trans, ret);
6810 goto discard;
6811 }
6812 } else {
6813 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
6814 0, BTRFS_I(inode)->dir_index);
6815 if (unlikely(ret)) {
6816 btrfs_abort_transaction(trans, ret);
6817 goto discard;
6818 }
6819 }
6820
6821 return 0;
6822
6823 discard:
6824 /*
6825 * discard_new_inode() calls iput(), but the caller owns the reference
6826 * to the inode.
6827 */
6828 ihold(inode);
6829 discard_new_inode(inode);
6830 out:
6831 if (xa_reserved)
6832 xa_release(&root->inodes, objectid);
6833
6834 btrfs_free_path(path);
6835 return ret;
6836 }
6837
6838 /*
6839 * utility function to add 'inode' into 'parent_inode' with
6840 * a give name and a given sequence number.
6841 * if 'add_backref' is true, also insert a backref from the
6842 * inode to the parent directory.
6843 */
btrfs_add_link(struct btrfs_trans_handle * trans,struct btrfs_inode * parent_inode,struct btrfs_inode * inode,const struct fscrypt_str * name,bool add_backref,u64 index)6844 int btrfs_add_link(struct btrfs_trans_handle *trans,
6845 struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
6846 const struct fscrypt_str *name, bool add_backref, u64 index)
6847 {
6848 int ret = 0;
6849 struct btrfs_key key;
6850 struct btrfs_root *root = parent_inode->root;
6851 u64 ino = btrfs_ino(inode);
6852 u64 parent_ino = btrfs_ino(parent_inode);
6853
6854 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6855 memcpy(&key, &inode->root->root_key, sizeof(key));
6856 } else {
6857 key.objectid = ino;
6858 key.type = BTRFS_INODE_ITEM_KEY;
6859 key.offset = 0;
6860 }
6861
6862 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6863 ret = btrfs_add_root_ref(trans, key.objectid,
6864 btrfs_root_id(root), parent_ino,
6865 index, name);
6866 } else if (add_backref) {
6867 ret = btrfs_insert_inode_ref(trans, root, name,
6868 ino, parent_ino, index);
6869 }
6870
6871 /* Nothing to clean up yet */
6872 if (ret)
6873 return ret;
6874
6875 ret = btrfs_insert_dir_item(trans, name, parent_inode, &key,
6876 btrfs_inode_type(inode), index);
6877 if (ret == -EEXIST || ret == -EOVERFLOW)
6878 goto fail_dir_item;
6879 else if (unlikely(ret)) {
6880 btrfs_abort_transaction(trans, ret);
6881 return ret;
6882 }
6883
6884 btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
6885 name->len * 2);
6886 inode_inc_iversion(&parent_inode->vfs_inode);
6887 update_time_after_link_or_unlink(parent_inode);
6888
6889 ret = btrfs_update_inode(trans, parent_inode);
6890 if (ret)
6891 btrfs_abort_transaction(trans, ret);
6892 return ret;
6893
6894 fail_dir_item:
6895 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6896 u64 local_index;
6897 int ret2;
6898
6899 ret2 = btrfs_del_root_ref(trans, key.objectid, btrfs_root_id(root),
6900 parent_ino, &local_index, name);
6901 if (ret2)
6902 btrfs_abort_transaction(trans, ret2);
6903 } else if (add_backref) {
6904 int ret2;
6905
6906 ret2 = btrfs_del_inode_ref(trans, root, name, ino, parent_ino, NULL);
6907 if (ret2)
6908 btrfs_abort_transaction(trans, ret2);
6909 }
6910
6911 /* Return the original error code */
6912 return ret;
6913 }
6914
btrfs_create_common(struct inode * dir,struct dentry * dentry,struct inode * inode)6915 static int btrfs_create_common(struct inode *dir, struct dentry *dentry,
6916 struct inode *inode)
6917 {
6918 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
6919 struct btrfs_root *root = BTRFS_I(dir)->root;
6920 struct btrfs_new_inode_args new_inode_args = {
6921 .dir = dir,
6922 .dentry = dentry,
6923 .inode = inode,
6924 };
6925 unsigned int trans_num_items;
6926 struct btrfs_trans_handle *trans;
6927 int ret;
6928
6929 ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
6930 if (ret)
6931 goto out_inode;
6932
6933 trans = btrfs_start_transaction(root, trans_num_items);
6934 if (IS_ERR(trans)) {
6935 ret = PTR_ERR(trans);
6936 goto out_new_inode_args;
6937 }
6938
6939 ret = btrfs_create_new_inode(trans, &new_inode_args);
6940 if (!ret) {
6941 if (S_ISDIR(inode->i_mode))
6942 inode->i_opflags |= IOP_FASTPERM_MAY_EXEC;
6943 d_instantiate_new(dentry, inode);
6944 }
6945
6946 btrfs_end_transaction(trans);
6947 btrfs_btree_balance_dirty(fs_info);
6948 out_new_inode_args:
6949 btrfs_new_inode_args_destroy(&new_inode_args);
6950 out_inode:
6951 if (ret)
6952 iput(inode);
6953 return ret;
6954 }
6955
btrfs_mknod(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode,dev_t rdev)6956 static int btrfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
6957 struct dentry *dentry, umode_t mode, dev_t rdev)
6958 {
6959 struct inode *inode;
6960
6961 inode = new_inode(dir->i_sb);
6962 if (!inode)
6963 return -ENOMEM;
6964 inode_init_owner(idmap, inode, dir, mode);
6965 inode->i_op = &btrfs_special_inode_operations;
6966 init_special_inode(inode, inode->i_mode, rdev);
6967 return btrfs_create_common(dir, dentry, inode);
6968 }
6969
btrfs_create(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode,bool excl)6970 static int btrfs_create(struct mnt_idmap *idmap, struct inode *dir,
6971 struct dentry *dentry, umode_t mode, bool excl)
6972 {
6973 struct inode *inode;
6974
6975 inode = new_inode(dir->i_sb);
6976 if (!inode)
6977 return -ENOMEM;
6978 inode_init_owner(idmap, inode, dir, mode);
6979 inode->i_fop = &btrfs_file_operations;
6980 inode->i_op = &btrfs_file_inode_operations;
6981 inode->i_mapping->a_ops = &btrfs_aops;
6982 return btrfs_create_common(dir, dentry, inode);
6983 }
6984
btrfs_link(struct dentry * old_dentry,struct inode * dir,struct dentry * dentry)6985 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
6986 struct dentry *dentry)
6987 {
6988 struct btrfs_trans_handle *trans = NULL;
6989 struct btrfs_root *root = BTRFS_I(dir)->root;
6990 struct inode *inode = d_inode(old_dentry);
6991 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
6992 struct fscrypt_name fname;
6993 u64 index;
6994 int ret;
6995
6996 /* do not allow sys_link's with other subvols of the same device */
6997 if (btrfs_root_id(root) != btrfs_root_id(BTRFS_I(inode)->root))
6998 return -EXDEV;
6999
7000 if (inode->i_nlink >= BTRFS_LINK_MAX)
7001 return -EMLINK;
7002
7003 ret = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname);
7004 if (ret)
7005 goto fail;
7006
7007 ret = btrfs_set_inode_index(BTRFS_I(dir), &index);
7008 if (ret)
7009 goto fail;
7010
7011 /*
7012 * 2 items for inode and inode ref
7013 * 2 items for dir items
7014 * 1 item for parent inode
7015 * 1 item for orphan item deletion if O_TMPFILE
7016 */
7017 trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6);
7018 if (IS_ERR(trans)) {
7019 ret = PTR_ERR(trans);
7020 trans = NULL;
7021 goto fail;
7022 }
7023
7024 /* There are several dir indexes for this inode, clear the cache. */
7025 BTRFS_I(inode)->dir_index = 0ULL;
7026 inode_inc_iversion(inode);
7027 inode_set_ctime_current(inode);
7028
7029 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
7030 &fname.disk_name, 1, index);
7031 if (ret)
7032 goto fail;
7033
7034 /* Link added now we update the inode item with the new link count. */
7035 inc_nlink(inode);
7036 ret = btrfs_update_inode(trans, BTRFS_I(inode));
7037 if (unlikely(ret)) {
7038 btrfs_abort_transaction(trans, ret);
7039 goto fail;
7040 }
7041
7042 if (inode->i_nlink == 1) {
7043 /*
7044 * If the new hard link count is 1, it's a file created with the
7045 * open(2) O_TMPFILE flag.
7046 */
7047 ret = btrfs_orphan_del(trans, BTRFS_I(inode));
7048 if (unlikely(ret)) {
7049 btrfs_abort_transaction(trans, ret);
7050 goto fail;
7051 }
7052 }
7053
7054 /* Grab reference for the new dentry passed to d_instantiate(). */
7055 ihold(inode);
7056 d_instantiate(dentry, inode);
7057 btrfs_log_new_name(trans, old_dentry, NULL, 0, dentry->d_parent);
7058
7059 fail:
7060 fscrypt_free_filename(&fname);
7061 if (trans)
7062 btrfs_end_transaction(trans);
7063 btrfs_btree_balance_dirty(fs_info);
7064 return ret;
7065 }
7066
btrfs_mkdir(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode)7067 static struct dentry *btrfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
7068 struct dentry *dentry, umode_t mode)
7069 {
7070 struct inode *inode;
7071
7072 inode = new_inode(dir->i_sb);
7073 if (!inode)
7074 return ERR_PTR(-ENOMEM);
7075 inode_init_owner(idmap, inode, dir, S_IFDIR | mode);
7076 inode->i_op = &btrfs_dir_inode_operations;
7077 inode->i_fop = &btrfs_dir_file_operations;
7078 return ERR_PTR(btrfs_create_common(dir, dentry, inode));
7079 }
7080
uncompress_inline(struct btrfs_path * path,struct folio * folio,struct btrfs_file_extent_item * item)7081 static noinline int uncompress_inline(struct btrfs_path *path,
7082 struct folio *folio,
7083 struct btrfs_file_extent_item *item)
7084 {
7085 int ret;
7086 struct extent_buffer *leaf = path->nodes[0];
7087 const u32 blocksize = leaf->fs_info->sectorsize;
7088 char *tmp;
7089 size_t max_size;
7090 unsigned long inline_size;
7091 unsigned long ptr;
7092 int compress_type;
7093
7094 compress_type = btrfs_file_extent_compression(leaf, item);
7095 max_size = btrfs_file_extent_ram_bytes(leaf, item);
7096 inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]);
7097 tmp = kmalloc(inline_size, GFP_NOFS);
7098 if (!tmp)
7099 return -ENOMEM;
7100 ptr = btrfs_file_extent_inline_start(item);
7101
7102 read_extent_buffer(leaf, tmp, ptr, inline_size);
7103
7104 max_size = min_t(unsigned long, blocksize, max_size);
7105 ret = btrfs_decompress(compress_type, tmp, folio, 0, inline_size,
7106 max_size);
7107
7108 /*
7109 * decompression code contains a memset to fill in any space between the end
7110 * of the uncompressed data and the end of max_size in case the decompressed
7111 * data ends up shorter than ram_bytes. That doesn't cover the hole between
7112 * the end of an inline extent and the beginning of the next block, so we
7113 * cover that region here.
7114 */
7115
7116 if (max_size < blocksize)
7117 folio_zero_range(folio, max_size, blocksize - max_size);
7118 kfree(tmp);
7119 return ret;
7120 }
7121
read_inline_extent(struct btrfs_path * path,struct folio * folio)7122 static int read_inline_extent(struct btrfs_path *path, struct folio *folio)
7123 {
7124 const u32 blocksize = path->nodes[0]->fs_info->sectorsize;
7125 struct btrfs_file_extent_item *fi;
7126 void *kaddr;
7127 size_t copy_size;
7128
7129 if (!folio || folio_test_uptodate(folio))
7130 return 0;
7131
7132 ASSERT(folio_pos(folio) == 0);
7133
7134 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
7135 struct btrfs_file_extent_item);
7136 if (btrfs_file_extent_compression(path->nodes[0], fi) != BTRFS_COMPRESS_NONE)
7137 return uncompress_inline(path, folio, fi);
7138
7139 copy_size = min_t(u64, blocksize,
7140 btrfs_file_extent_ram_bytes(path->nodes[0], fi));
7141 kaddr = kmap_local_folio(folio, 0);
7142 read_extent_buffer(path->nodes[0], kaddr,
7143 btrfs_file_extent_inline_start(fi), copy_size);
7144 kunmap_local(kaddr);
7145 if (copy_size < blocksize)
7146 folio_zero_range(folio, copy_size, blocksize - copy_size);
7147 return 0;
7148 }
7149
7150 /*
7151 * Lookup the first extent overlapping a range in a file.
7152 *
7153 * @inode: file to search in
7154 * @page: page to read extent data into if the extent is inline
7155 * @start: file offset
7156 * @len: length of range starting at @start
7157 *
7158 * Return the first &struct extent_map which overlaps the given range, reading
7159 * it from the B-tree and caching it if necessary. Note that there may be more
7160 * extents which overlap the given range after the returned extent_map.
7161 *
7162 * If @page is not NULL and the extent is inline, this also reads the extent
7163 * data directly into the page and marks the extent up to date in the io_tree.
7164 *
7165 * Return: ERR_PTR on error, non-NULL extent_map on success.
7166 */
btrfs_get_extent(struct btrfs_inode * inode,struct folio * folio,u64 start,u64 len)7167 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
7168 struct folio *folio, u64 start, u64 len)
7169 {
7170 struct btrfs_fs_info *fs_info = inode->root->fs_info;
7171 int ret = 0;
7172 u64 extent_start = 0;
7173 u64 extent_end = 0;
7174 u64 objectid = btrfs_ino(inode);
7175 int extent_type = -1;
7176 struct btrfs_path *path = NULL;
7177 struct btrfs_root *root = inode->root;
7178 struct btrfs_file_extent_item *item;
7179 struct extent_buffer *leaf;
7180 struct btrfs_key found_key;
7181 struct extent_map *em = NULL;
7182 struct extent_map_tree *em_tree = &inode->extent_tree;
7183
7184 read_lock(&em_tree->lock);
7185 em = btrfs_lookup_extent_mapping(em_tree, start, len);
7186 read_unlock(&em_tree->lock);
7187
7188 if (em) {
7189 if (em->start > start || btrfs_extent_map_end(em) <= start)
7190 btrfs_free_extent_map(em);
7191 else if (em->disk_bytenr == EXTENT_MAP_INLINE && folio)
7192 btrfs_free_extent_map(em);
7193 else
7194 goto out;
7195 }
7196 em = btrfs_alloc_extent_map();
7197 if (!em) {
7198 ret = -ENOMEM;
7199 goto out;
7200 }
7201 em->start = EXTENT_MAP_HOLE;
7202 em->disk_bytenr = EXTENT_MAP_HOLE;
7203 em->len = (u64)-1;
7204
7205 path = btrfs_alloc_path();
7206 if (!path) {
7207 ret = -ENOMEM;
7208 goto out;
7209 }
7210
7211 /* Chances are we'll be called again, so go ahead and do readahead */
7212 path->reada = READA_FORWARD;
7213
7214 /*
7215 * The same explanation in load_free_space_cache applies here as well,
7216 * we only read when we're loading the free space cache, and at that
7217 * point the commit_root has everything we need.
7218 */
7219 if (btrfs_is_free_space_inode(inode)) {
7220 path->search_commit_root = true;
7221 path->skip_locking = true;
7222 }
7223
7224 ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0);
7225 if (ret < 0) {
7226 goto out;
7227 } else if (ret > 0) {
7228 if (path->slots[0] == 0)
7229 goto not_found;
7230 path->slots[0]--;
7231 ret = 0;
7232 }
7233
7234 leaf = path->nodes[0];
7235 item = btrfs_item_ptr(leaf, path->slots[0],
7236 struct btrfs_file_extent_item);
7237 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7238 if (found_key.objectid != objectid ||
7239 found_key.type != BTRFS_EXTENT_DATA_KEY) {
7240 /*
7241 * If we backup past the first extent we want to move forward
7242 * and see if there is an extent in front of us, otherwise we'll
7243 * say there is a hole for our whole search range which can
7244 * cause problems.
7245 */
7246 extent_end = start;
7247 goto next;
7248 }
7249
7250 extent_type = btrfs_file_extent_type(leaf, item);
7251 extent_start = found_key.offset;
7252 extent_end = btrfs_file_extent_end(path);
7253 if (extent_type == BTRFS_FILE_EXTENT_REG ||
7254 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
7255 /* Only regular file could have regular/prealloc extent */
7256 if (unlikely(!S_ISREG(inode->vfs_inode.i_mode))) {
7257 ret = -EUCLEAN;
7258 btrfs_crit(fs_info,
7259 "regular/prealloc extent found for non-regular inode %llu",
7260 btrfs_ino(inode));
7261 goto out;
7262 }
7263 trace_btrfs_get_extent_show_fi_regular(inode, leaf, item,
7264 extent_start);
7265 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
7266 trace_btrfs_get_extent_show_fi_inline(inode, leaf, item,
7267 path->slots[0],
7268 extent_start);
7269 }
7270 next:
7271 if (start >= extent_end) {
7272 path->slots[0]++;
7273 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
7274 ret = btrfs_next_leaf(root, path);
7275 if (ret < 0)
7276 goto out;
7277 else if (ret > 0)
7278 goto not_found;
7279
7280 leaf = path->nodes[0];
7281 }
7282 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7283 if (found_key.objectid != objectid ||
7284 found_key.type != BTRFS_EXTENT_DATA_KEY)
7285 goto not_found;
7286 if (start + len <= found_key.offset)
7287 goto not_found;
7288 if (start > found_key.offset)
7289 goto next;
7290
7291 /* New extent overlaps with existing one */
7292 em->start = start;
7293 em->len = found_key.offset - start;
7294 em->disk_bytenr = EXTENT_MAP_HOLE;
7295 goto insert;
7296 }
7297
7298 btrfs_extent_item_to_extent_map(inode, path, item, em);
7299
7300 if (extent_type == BTRFS_FILE_EXTENT_REG ||
7301 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
7302 goto insert;
7303 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
7304 /*
7305 * Inline extent can only exist at file offset 0. This is
7306 * ensured by tree-checker and inline extent creation path.
7307 * Thus all members representing file offsets should be zero.
7308 */
7309 ASSERT(extent_start == 0);
7310 ASSERT(em->start == 0);
7311
7312 /*
7313 * btrfs_extent_item_to_extent_map() should have properly
7314 * initialized em members already.
7315 *
7316 * Other members are not utilized for inline extents.
7317 */
7318 ASSERT(em->disk_bytenr == EXTENT_MAP_INLINE);
7319 ASSERT(em->len == fs_info->sectorsize);
7320
7321 ret = read_inline_extent(path, folio);
7322 if (ret < 0)
7323 goto out;
7324 goto insert;
7325 }
7326 not_found:
7327 em->start = start;
7328 em->len = len;
7329 em->disk_bytenr = EXTENT_MAP_HOLE;
7330 insert:
7331 ret = 0;
7332 btrfs_release_path(path);
7333 if (unlikely(em->start > start || btrfs_extent_map_end(em) <= start)) {
7334 btrfs_err(fs_info,
7335 "bad extent! em: [%llu %llu] passed [%llu %llu]",
7336 em->start, em->len, start, len);
7337 ret = -EIO;
7338 goto out;
7339 }
7340
7341 write_lock(&em_tree->lock);
7342 ret = btrfs_add_extent_mapping(inode, &em, start, len);
7343 write_unlock(&em_tree->lock);
7344 out:
7345 btrfs_free_path(path);
7346
7347 trace_btrfs_get_extent(root, inode, em);
7348
7349 if (ret) {
7350 btrfs_free_extent_map(em);
7351 return ERR_PTR(ret);
7352 }
7353 return em;
7354 }
7355
btrfs_extent_readonly(struct btrfs_fs_info * fs_info,u64 bytenr)7356 static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
7357 {
7358 struct btrfs_block_group *block_group;
7359 bool readonly = false;
7360
7361 block_group = btrfs_lookup_block_group(fs_info, bytenr);
7362 if (!block_group || block_group->ro)
7363 readonly = true;
7364 if (block_group)
7365 btrfs_put_block_group(block_group);
7366 return readonly;
7367 }
7368
7369 /*
7370 * Check if we can do nocow write into the range [@offset, @offset + @len)
7371 *
7372 * @offset: File offset
7373 * @len: The length to write, will be updated to the nocow writeable
7374 * range
7375 * @orig_start: (optional) Return the original file offset of the file extent
7376 * @orig_len: (optional) Return the original on-disk length of the file extent
7377 * @ram_bytes: (optional) Return the ram_bytes of the file extent
7378 *
7379 * Return:
7380 * >0 and update @len if we can do nocow write
7381 * 0 if we can't do nocow write
7382 * <0 if error happened
7383 *
7384 * NOTE: This only checks the file extents, caller is responsible to wait for
7385 * any ordered extents.
7386 */
can_nocow_extent(struct btrfs_inode * inode,u64 offset,u64 * len,struct btrfs_file_extent * file_extent,bool nowait)7387 noinline int can_nocow_extent(struct btrfs_inode *inode, u64 offset, u64 *len,
7388 struct btrfs_file_extent *file_extent,
7389 bool nowait)
7390 {
7391 struct btrfs_root *root = inode->root;
7392 struct btrfs_fs_info *fs_info = root->fs_info;
7393 struct can_nocow_file_extent_args nocow_args = { 0 };
7394 BTRFS_PATH_AUTO_FREE(path);
7395 int ret;
7396 struct extent_buffer *leaf;
7397 struct extent_io_tree *io_tree = &inode->io_tree;
7398 struct btrfs_file_extent_item *fi;
7399 struct btrfs_key key;
7400 int found_type;
7401
7402 path = btrfs_alloc_path();
7403 if (!path)
7404 return -ENOMEM;
7405 path->nowait = nowait;
7406
7407 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
7408 offset, 0);
7409 if (ret < 0)
7410 return ret;
7411
7412 if (ret == 1) {
7413 if (path->slots[0] == 0) {
7414 /* Can't find the item, must COW. */
7415 return 0;
7416 }
7417 path->slots[0]--;
7418 }
7419 ret = 0;
7420 leaf = path->nodes[0];
7421 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
7422 if (key.objectid != btrfs_ino(inode) ||
7423 key.type != BTRFS_EXTENT_DATA_KEY) {
7424 /* Not our file or wrong item type, must COW. */
7425 return 0;
7426 }
7427
7428 if (key.offset > offset) {
7429 /* Wrong offset, must COW. */
7430 return 0;
7431 }
7432
7433 if (btrfs_file_extent_end(path) <= offset)
7434 return 0;
7435
7436 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
7437 found_type = btrfs_file_extent_type(leaf, fi);
7438
7439 nocow_args.start = offset;
7440 nocow_args.end = offset + *len - 1;
7441 nocow_args.free_path = true;
7442
7443 ret = can_nocow_file_extent(path, &key, inode, &nocow_args);
7444 /* can_nocow_file_extent() has freed the path. */
7445 path = NULL;
7446
7447 if (ret != 1) {
7448 /* Treat errors as not being able to NOCOW. */
7449 return 0;
7450 }
7451
7452 if (btrfs_extent_readonly(fs_info,
7453 nocow_args.file_extent.disk_bytenr +
7454 nocow_args.file_extent.offset))
7455 return 0;
7456
7457 if (!(inode->flags & BTRFS_INODE_NODATACOW) &&
7458 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7459 u64 range_end;
7460
7461 range_end = round_up(offset + nocow_args.file_extent.num_bytes,
7462 root->fs_info->sectorsize) - 1;
7463 ret = btrfs_test_range_bit_exists(io_tree, offset, range_end,
7464 EXTENT_DELALLOC);
7465 if (ret)
7466 return -EAGAIN;
7467 }
7468
7469 if (file_extent)
7470 memcpy(file_extent, &nocow_args.file_extent, sizeof(*file_extent));
7471
7472 *len = nocow_args.file_extent.num_bytes;
7473
7474 return 1;
7475 }
7476
7477 /* The callers of this must take lock_extent() */
btrfs_create_io_em(struct btrfs_inode * inode,u64 start,const struct btrfs_file_extent * file_extent,int type)7478 struct extent_map *btrfs_create_io_em(struct btrfs_inode *inode, u64 start,
7479 const struct btrfs_file_extent *file_extent,
7480 int type)
7481 {
7482 struct extent_map *em;
7483 int ret;
7484
7485 /*
7486 * Note the missing NOCOW type.
7487 *
7488 * For pure NOCOW writes, we should not create an io extent map, but
7489 * just reusing the existing one.
7490 * Only PREALLOC writes (NOCOW write into preallocated range) can
7491 * create an io extent map.
7492 */
7493 ASSERT(type == BTRFS_ORDERED_PREALLOC ||
7494 type == BTRFS_ORDERED_COMPRESSED ||
7495 type == BTRFS_ORDERED_REGULAR);
7496
7497 switch (type) {
7498 case BTRFS_ORDERED_PREALLOC:
7499 /* We're only referring part of a larger preallocated extent. */
7500 ASSERT(file_extent->num_bytes <= file_extent->ram_bytes);
7501 break;
7502 case BTRFS_ORDERED_REGULAR:
7503 /* COW results a new extent matching our file extent size. */
7504 ASSERT(file_extent->disk_num_bytes == file_extent->num_bytes);
7505 ASSERT(file_extent->ram_bytes == file_extent->num_bytes);
7506
7507 /* Since it's a new extent, we should not have any offset. */
7508 ASSERT(file_extent->offset == 0);
7509 break;
7510 case BTRFS_ORDERED_COMPRESSED:
7511 /* Must be compressed. */
7512 ASSERT(file_extent->compression != BTRFS_COMPRESS_NONE);
7513
7514 /*
7515 * Encoded write can make us to refer to part of the
7516 * uncompressed extent.
7517 */
7518 ASSERT(file_extent->num_bytes <= file_extent->ram_bytes);
7519 break;
7520 }
7521
7522 em = btrfs_alloc_extent_map();
7523 if (!em)
7524 return ERR_PTR(-ENOMEM);
7525
7526 em->start = start;
7527 em->len = file_extent->num_bytes;
7528 em->disk_bytenr = file_extent->disk_bytenr;
7529 em->disk_num_bytes = file_extent->disk_num_bytes;
7530 em->ram_bytes = file_extent->ram_bytes;
7531 em->generation = -1;
7532 em->offset = file_extent->offset;
7533 em->flags |= EXTENT_FLAG_PINNED;
7534 if (type == BTRFS_ORDERED_COMPRESSED)
7535 btrfs_extent_map_set_compression(em, file_extent->compression);
7536
7537 ret = btrfs_replace_extent_map_range(inode, em, true);
7538 if (ret) {
7539 btrfs_free_extent_map(em);
7540 return ERR_PTR(ret);
7541 }
7542
7543 /* em got 2 refs now, callers needs to do btrfs_free_extent_map once. */
7544 return em;
7545 }
7546
7547 /*
7548 * For release_folio() and invalidate_folio() we have a race window where
7549 * folio_end_writeback() is called but the subpage spinlock is not yet released.
7550 * If we continue to release/invalidate the page, we could cause use-after-free
7551 * for subpage spinlock. So this function is to spin and wait for subpage
7552 * spinlock.
7553 */
wait_subpage_spinlock(struct folio * folio)7554 static void wait_subpage_spinlock(struct folio *folio)
7555 {
7556 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
7557 struct btrfs_folio_state *bfs;
7558
7559 if (!btrfs_is_subpage(fs_info, folio))
7560 return;
7561
7562 ASSERT(folio_test_private(folio) && folio_get_private(folio));
7563 bfs = folio_get_private(folio);
7564
7565 /*
7566 * This may look insane as we just acquire the spinlock and release it,
7567 * without doing anything. But we just want to make sure no one is
7568 * still holding the subpage spinlock.
7569 * And since the page is not dirty nor writeback, and we have page
7570 * locked, the only possible way to hold a spinlock is from the endio
7571 * function to clear page writeback.
7572 *
7573 * Here we just acquire the spinlock so that all existing callers
7574 * should exit and we're safe to release/invalidate the page.
7575 */
7576 spin_lock_irq(&bfs->lock);
7577 spin_unlock_irq(&bfs->lock);
7578 }
7579
btrfs_launder_folio(struct folio * folio)7580 static int btrfs_launder_folio(struct folio *folio)
7581 {
7582 return btrfs_qgroup_free_data(folio_to_inode(folio), NULL, folio_pos(folio),
7583 folio_size(folio), NULL);
7584 }
7585
__btrfs_release_folio(struct folio * folio,gfp_t gfp_flags)7586 static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
7587 {
7588 if (try_release_extent_mapping(folio, gfp_flags)) {
7589 wait_subpage_spinlock(folio);
7590 clear_folio_extent_mapped(folio);
7591 return true;
7592 }
7593 return false;
7594 }
7595
btrfs_release_folio(struct folio * folio,gfp_t gfp_flags)7596 static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
7597 {
7598 if (folio_test_writeback(folio) || folio_test_dirty(folio))
7599 return false;
7600 return __btrfs_release_folio(folio, gfp_flags);
7601 }
7602
7603 #ifdef CONFIG_MIGRATION
btrfs_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)7604 static int btrfs_migrate_folio(struct address_space *mapping,
7605 struct folio *dst, struct folio *src,
7606 enum migrate_mode mode)
7607 {
7608 int ret = filemap_migrate_folio(mapping, dst, src, mode);
7609
7610 if (ret)
7611 return ret;
7612
7613 if (folio_test_ordered(src)) {
7614 folio_clear_ordered(src);
7615 folio_set_ordered(dst);
7616 }
7617
7618 return 0;
7619 }
7620 #else
7621 #define btrfs_migrate_folio NULL
7622 #endif
7623
btrfs_invalidate_folio(struct folio * folio,size_t offset,size_t length)7624 static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
7625 size_t length)
7626 {
7627 struct btrfs_inode *inode = folio_to_inode(folio);
7628 struct btrfs_fs_info *fs_info = inode->root->fs_info;
7629 struct extent_io_tree *tree = &inode->io_tree;
7630 struct extent_state *cached_state = NULL;
7631 u64 page_start = folio_pos(folio);
7632 u64 page_end = page_start + folio_size(folio) - 1;
7633 u64 cur;
7634 int inode_evicting = inode_state_read_once(&inode->vfs_inode) & I_FREEING;
7635
7636 /*
7637 * We have folio locked so no new ordered extent can be created on this
7638 * page, nor bio can be submitted for this folio.
7639 *
7640 * But already submitted bio can still be finished on this folio.
7641 * Furthermore, endio function won't skip folio which has Ordered
7642 * already cleared, so it's possible for endio and
7643 * invalidate_folio to do the same ordered extent accounting twice
7644 * on one folio.
7645 *
7646 * So here we wait for any submitted bios to finish, so that we won't
7647 * do double ordered extent accounting on the same folio.
7648 */
7649 folio_wait_writeback(folio);
7650 wait_subpage_spinlock(folio);
7651
7652 /*
7653 * For subpage case, we have call sites like
7654 * btrfs_punch_hole_lock_range() which passes range not aligned to
7655 * sectorsize.
7656 * If the range doesn't cover the full folio, we don't need to and
7657 * shouldn't clear page extent mapped, as folio->private can still
7658 * record subpage dirty bits for other part of the range.
7659 *
7660 * For cases that invalidate the full folio even the range doesn't
7661 * cover the full folio, like invalidating the last folio, we're
7662 * still safe to wait for ordered extent to finish.
7663 */
7664 if (!(offset == 0 && length == folio_size(folio))) {
7665 btrfs_release_folio(folio, GFP_NOFS);
7666 return;
7667 }
7668
7669 if (!inode_evicting)
7670 btrfs_lock_extent(tree, page_start, page_end, &cached_state);
7671
7672 cur = page_start;
7673 while (cur < page_end) {
7674 struct btrfs_ordered_extent *ordered;
7675 u64 range_end;
7676 u32 range_len;
7677 u32 extra_flags = 0;
7678
7679 ordered = btrfs_lookup_first_ordered_range(inode, cur,
7680 page_end + 1 - cur);
7681 if (!ordered) {
7682 range_end = page_end;
7683 /*
7684 * No ordered extent covering this range, we are safe
7685 * to delete all extent states in the range.
7686 */
7687 extra_flags = EXTENT_CLEAR_ALL_BITS;
7688 goto next;
7689 }
7690 if (ordered->file_offset > cur) {
7691 /*
7692 * There is a range between [cur, oe->file_offset) not
7693 * covered by any ordered extent.
7694 * We are safe to delete all extent states, and handle
7695 * the ordered extent in the next iteration.
7696 */
7697 range_end = ordered->file_offset - 1;
7698 extra_flags = EXTENT_CLEAR_ALL_BITS;
7699 goto next;
7700 }
7701
7702 range_end = min(ordered->file_offset + ordered->num_bytes - 1,
7703 page_end);
7704 ASSERT(range_end + 1 - cur < U32_MAX);
7705 range_len = range_end + 1 - cur;
7706 if (!btrfs_folio_test_ordered(fs_info, folio, cur, range_len)) {
7707 /*
7708 * If Ordered is cleared, it means endio has
7709 * already been executed for the range.
7710 * We can't delete the extent states as
7711 * btrfs_finish_ordered_io() may still use some of them.
7712 */
7713 goto next;
7714 }
7715 btrfs_folio_clear_ordered(fs_info, folio, cur, range_len);
7716
7717 /*
7718 * IO on this page will never be started, so we need to account
7719 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW
7720 * here, must leave that up for the ordered extent completion.
7721 *
7722 * This will also unlock the range for incoming
7723 * btrfs_finish_ordered_io().
7724 */
7725 if (!inode_evicting)
7726 btrfs_clear_extent_bit(tree, cur, range_end,
7727 EXTENT_DELALLOC |
7728 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
7729 EXTENT_DEFRAG, &cached_state);
7730
7731 spin_lock(&inode->ordered_tree_lock);
7732 set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
7733 ordered->truncated_len = min(ordered->truncated_len,
7734 cur - ordered->file_offset);
7735 spin_unlock(&inode->ordered_tree_lock);
7736
7737 /*
7738 * If the ordered extent has finished, we're safe to delete all
7739 * the extent states of the range, otherwise
7740 * btrfs_finish_ordered_io() will get executed by endio for
7741 * other pages, so we can't delete extent states.
7742 */
7743 if (btrfs_dec_test_ordered_pending(inode, &ordered,
7744 cur, range_end + 1 - cur)) {
7745 btrfs_finish_ordered_io(ordered);
7746 /*
7747 * The ordered extent has finished, now we're again
7748 * safe to delete all extent states of the range.
7749 */
7750 extra_flags = EXTENT_CLEAR_ALL_BITS;
7751 }
7752 next:
7753 if (ordered)
7754 btrfs_put_ordered_extent(ordered);
7755 /*
7756 * Qgroup reserved space handler
7757 * Sector(s) here will be either:
7758 *
7759 * 1) Already written to disk or bio already finished
7760 * Then its QGROUP_RESERVED bit in io_tree is already cleared.
7761 * Qgroup will be handled by its qgroup_record then.
7762 * btrfs_qgroup_free_data() call will do nothing here.
7763 *
7764 * 2) Not written to disk yet
7765 * Then btrfs_qgroup_free_data() call will clear the
7766 * QGROUP_RESERVED bit of its io_tree, and free the qgroup
7767 * reserved data space.
7768 * Since the IO will never happen for this page.
7769 */
7770 btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur, NULL);
7771 if (!inode_evicting)
7772 btrfs_clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED |
7773 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
7774 EXTENT_DEFRAG | extra_flags,
7775 &cached_state);
7776 cur = range_end + 1;
7777 }
7778 /*
7779 * We have iterated through all ordered extents of the page, the page
7780 * should not have Ordered anymore, or the above iteration
7781 * did something wrong.
7782 */
7783 ASSERT(!folio_test_ordered(folio));
7784 btrfs_folio_clear_checked(fs_info, folio, folio_pos(folio), folio_size(folio));
7785 if (!inode_evicting)
7786 __btrfs_release_folio(folio, GFP_NOFS);
7787 clear_folio_extent_mapped(folio);
7788 }
7789
btrfs_truncate(struct btrfs_inode * inode,bool skip_writeback)7790 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
7791 {
7792 struct btrfs_truncate_control control = {
7793 .inode = inode,
7794 .ino = btrfs_ino(inode),
7795 .min_type = BTRFS_EXTENT_DATA_KEY,
7796 .clear_extent_range = true,
7797 .new_size = inode->vfs_inode.i_size,
7798 };
7799 struct btrfs_root *root = inode->root;
7800 struct btrfs_fs_info *fs_info = root->fs_info;
7801 struct btrfs_block_rsv rsv;
7802 int ret;
7803 struct btrfs_trans_handle *trans;
7804 const u64 min_size = btrfs_calc_metadata_size(fs_info, 1);
7805 const u64 lock_start = round_down(inode->vfs_inode.i_size, fs_info->sectorsize);
7806 const u64 i_size_up = round_up(inode->vfs_inode.i_size, fs_info->sectorsize);
7807
7808 /* Our inode is locked and the i_size can't be changed concurrently. */
7809 btrfs_assert_inode_locked(inode);
7810
7811 if (!skip_writeback) {
7812 ret = btrfs_wait_ordered_range(inode, lock_start, (u64)-1);
7813 if (ret)
7814 return ret;
7815 }
7816
7817 /*
7818 * Yes ladies and gentlemen, this is indeed ugly. We have a couple of
7819 * things going on here:
7820 *
7821 * 1) We need to reserve space to update our inode.
7822 *
7823 * 2) We need to have something to cache all the space that is going to
7824 * be free'd up by the truncate operation, but also have some slack
7825 * space reserved in case it uses space during the truncate (thank you
7826 * very much snapshotting).
7827 *
7828 * And we need these to be separate. The fact is we can use a lot of
7829 * space doing the truncate, and we have no earthly idea how much space
7830 * we will use, so we need the truncate reservation to be separate so it
7831 * doesn't end up using space reserved for updating the inode. We also
7832 * need to be able to stop the transaction and start a new one, which
7833 * means we need to be able to update the inode several times, and we
7834 * have no idea of knowing how many times that will be, so we can't just
7835 * reserve 1 item for the entirety of the operation, so that has to be
7836 * done separately as well.
7837 *
7838 * So that leaves us with
7839 *
7840 * 1) rsv - for the truncate reservation, which we will steal from the
7841 * transaction reservation.
7842 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for
7843 * updating the inode.
7844 */
7845 btrfs_init_metadata_block_rsv(fs_info, &rsv, BTRFS_BLOCK_RSV_TEMP);
7846 rsv.size = min_size;
7847 rsv.failfast = true;
7848
7849 /*
7850 * 1 for the truncate slack space
7851 * 1 for updating the inode.
7852 */
7853 trans = btrfs_start_transaction(root, 2);
7854 if (IS_ERR(trans)) {
7855 ret = PTR_ERR(trans);
7856 goto out;
7857 }
7858
7859 /* Migrate the slack space for the truncate to our reserve */
7860 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, &rsv,
7861 min_size, false);
7862 /*
7863 * We have reserved 2 metadata units when we started the transaction and
7864 * min_size matches 1 unit, so this should never fail, but if it does,
7865 * it's not critical we just fail truncation.
7866 */
7867 if (WARN_ON(ret)) {
7868 btrfs_end_transaction(trans);
7869 goto out;
7870 }
7871
7872 trans->block_rsv = &rsv;
7873
7874 while (1) {
7875 struct extent_state *cached_state = NULL;
7876
7877 btrfs_lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
7878 /*
7879 * We want to drop from the next block forward in case this new
7880 * size is not block aligned since we will be keeping the last
7881 * block of the extent just the way it is.
7882 */
7883 btrfs_drop_extent_map_range(inode, i_size_up, (u64)-1, false);
7884
7885 ret = btrfs_truncate_inode_items(trans, root, &control);
7886
7887 inode_sub_bytes(&inode->vfs_inode, control.sub_bytes);
7888 btrfs_inode_safe_disk_i_size_write(inode, control.last_size);
7889
7890 btrfs_unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
7891
7892 trans->block_rsv = &fs_info->trans_block_rsv;
7893 if (ret != -ENOSPC && ret != -EAGAIN)
7894 break;
7895
7896 ret = btrfs_update_inode(trans, inode);
7897 if (ret)
7898 break;
7899
7900 btrfs_end_transaction(trans);
7901 btrfs_btree_balance_dirty(fs_info);
7902
7903 trans = btrfs_start_transaction(root, 2);
7904 if (IS_ERR(trans)) {
7905 ret = PTR_ERR(trans);
7906 trans = NULL;
7907 break;
7908 }
7909
7910 btrfs_block_rsv_release(fs_info, &rsv, -1, NULL);
7911 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
7912 &rsv, min_size, false);
7913 /*
7914 * We have reserved 2 metadata units when we started the
7915 * transaction and min_size matches 1 unit, so this should never
7916 * fail, but if it does, it's not critical we just fail truncation.
7917 */
7918 if (WARN_ON(ret))
7919 break;
7920
7921 trans->block_rsv = &rsv;
7922 }
7923
7924 /*
7925 * We can't call btrfs_truncate_block inside a trans handle as we could
7926 * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we
7927 * know we've truncated everything except the last little bit, and can
7928 * do btrfs_truncate_block and then update the disk_i_size.
7929 */
7930 if (ret == BTRFS_NEED_TRUNCATE_BLOCK) {
7931 btrfs_end_transaction(trans);
7932 btrfs_btree_balance_dirty(fs_info);
7933
7934 ret = btrfs_truncate_block(inode, inode->vfs_inode.i_size,
7935 inode->vfs_inode.i_size, (u64)-1);
7936 if (ret)
7937 goto out;
7938 trans = btrfs_start_transaction(root, 1);
7939 if (IS_ERR(trans)) {
7940 ret = PTR_ERR(trans);
7941 goto out;
7942 }
7943 btrfs_inode_safe_disk_i_size_write(inode, 0);
7944 }
7945
7946 if (trans) {
7947 int ret2;
7948
7949 trans->block_rsv = &fs_info->trans_block_rsv;
7950 ret2 = btrfs_update_inode(trans, inode);
7951 if (ret2 && !ret)
7952 ret = ret2;
7953
7954 ret2 = btrfs_end_transaction(trans);
7955 if (ret2 && !ret)
7956 ret = ret2;
7957 btrfs_btree_balance_dirty(fs_info);
7958 }
7959 out:
7960 btrfs_block_rsv_release(fs_info, &rsv, (u64)-1, NULL);
7961 /*
7962 * So if we truncate and then write and fsync we normally would just
7963 * write the extents that changed, which is a problem if we need to
7964 * first truncate that entire inode. So set this flag so we write out
7965 * all of the extents in the inode to the sync log so we're completely
7966 * safe.
7967 *
7968 * If no extents were dropped or trimmed we don't need to force the next
7969 * fsync to truncate all the inode's items from the log and re-log them
7970 * all. This means the truncate operation did not change the file size,
7971 * or changed it to a smaller size but there was only an implicit hole
7972 * between the old i_size and the new i_size, and there were no prealloc
7973 * extents beyond i_size to drop.
7974 */
7975 if (control.extents_found > 0)
7976 btrfs_set_inode_full_sync(inode);
7977
7978 return ret;
7979 }
7980
btrfs_new_subvol_inode(struct mnt_idmap * idmap,struct inode * dir)7981 struct inode *btrfs_new_subvol_inode(struct mnt_idmap *idmap,
7982 struct inode *dir)
7983 {
7984 struct inode *inode;
7985
7986 inode = new_inode(dir->i_sb);
7987 if (inode) {
7988 /*
7989 * Subvolumes don't inherit the sgid bit or the parent's gid if
7990 * the parent's sgid bit is set. This is probably a bug.
7991 */
7992 inode_init_owner(idmap, inode, NULL,
7993 S_IFDIR | (~current_umask() & S_IRWXUGO));
7994 inode->i_op = &btrfs_dir_inode_operations;
7995 inode->i_fop = &btrfs_dir_file_operations;
7996 }
7997 return inode;
7998 }
7999
btrfs_alloc_inode(struct super_block * sb)8000 struct inode *btrfs_alloc_inode(struct super_block *sb)
8001 {
8002 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
8003 struct btrfs_inode *ei;
8004 struct inode *inode;
8005
8006 ei = alloc_inode_sb(sb, btrfs_inode_cachep, GFP_KERNEL);
8007 if (!ei)
8008 return NULL;
8009
8010 ei->root = NULL;
8011 ei->generation = 0;
8012 ei->last_trans = 0;
8013 ei->last_sub_trans = 0;
8014 ei->logged_trans = 0;
8015 ei->delalloc_bytes = 0;
8016 /* new_delalloc_bytes and last_dir_index_offset are in a union. */
8017 ei->new_delalloc_bytes = 0;
8018 ei->defrag_bytes = 0;
8019 ei->disk_i_size = 0;
8020 ei->flags = 0;
8021 ei->ro_flags = 0;
8022 /*
8023 * ->index_cnt will be properly initialized later when creating a new
8024 * inode (btrfs_create_new_inode()) or when reading an existing inode
8025 * from disk (btrfs_read_locked_inode()).
8026 */
8027 ei->csum_bytes = 0;
8028 ei->dir_index = 0;
8029 ei->last_unlink_trans = 0;
8030 ei->last_reflink_trans = 0;
8031 ei->last_log_commit = 0;
8032
8033 spin_lock_init(&ei->lock);
8034 ei->outstanding_extents = 0;
8035 if (sb->s_magic != BTRFS_TEST_MAGIC)
8036 btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv,
8037 BTRFS_BLOCK_RSV_DELALLOC);
8038 ei->runtime_flags = 0;
8039 ei->prop_compress = BTRFS_COMPRESS_NONE;
8040 ei->defrag_compress = BTRFS_COMPRESS_NONE;
8041
8042 ei->delayed_node = NULL;
8043
8044 ei->i_otime_sec = 0;
8045 ei->i_otime_nsec = 0;
8046
8047 inode = &ei->vfs_inode;
8048 btrfs_extent_map_tree_init(&ei->extent_tree);
8049
8050 /* This io tree sets the valid inode. */
8051 btrfs_extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO);
8052 ei->io_tree.inode = ei;
8053
8054 ei->file_extent_tree = NULL;
8055
8056 mutex_init(&ei->log_mutex);
8057 spin_lock_init(&ei->ordered_tree_lock);
8058 ei->ordered_tree = RB_ROOT;
8059 ei->ordered_tree_last = NULL;
8060 INIT_LIST_HEAD(&ei->delalloc_inodes);
8061 INIT_LIST_HEAD(&ei->delayed_iput);
8062 init_rwsem(&ei->i_mmap_lock);
8063
8064 return inode;
8065 }
8066
8067 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
btrfs_test_destroy_inode(struct inode * inode)8068 void btrfs_test_destroy_inode(struct inode *inode)
8069 {
8070 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
8071 kfree(BTRFS_I(inode)->file_extent_tree);
8072 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
8073 }
8074 #endif
8075
btrfs_free_inode(struct inode * inode)8076 void btrfs_free_inode(struct inode *inode)
8077 {
8078 kfree(BTRFS_I(inode)->file_extent_tree);
8079 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
8080 }
8081
btrfs_destroy_inode(struct inode * vfs_inode)8082 void btrfs_destroy_inode(struct inode *vfs_inode)
8083 {
8084 struct btrfs_ordered_extent *ordered;
8085 struct btrfs_inode *inode = BTRFS_I(vfs_inode);
8086 struct btrfs_root *root = inode->root;
8087 bool freespace_inode;
8088
8089 WARN_ON(!hlist_empty(&vfs_inode->i_dentry));
8090 WARN_ON(vfs_inode->i_data.nrpages);
8091 WARN_ON(inode->block_rsv.reserved);
8092 WARN_ON(inode->block_rsv.size);
8093 WARN_ON(inode->outstanding_extents);
8094 if (!S_ISDIR(vfs_inode->i_mode)) {
8095 WARN_ON(inode->delalloc_bytes);
8096 WARN_ON(inode->new_delalloc_bytes);
8097 WARN_ON(inode->csum_bytes);
8098 }
8099 if (!root || !btrfs_is_data_reloc_root(root))
8100 WARN_ON(inode->defrag_bytes);
8101
8102 /*
8103 * This can happen where we create an inode, but somebody else also
8104 * created the same inode and we need to destroy the one we already
8105 * created.
8106 */
8107 if (!root)
8108 return;
8109
8110 /*
8111 * If this is a free space inode do not take the ordered extents lockdep
8112 * map.
8113 */
8114 freespace_inode = btrfs_is_free_space_inode(inode);
8115
8116 while (1) {
8117 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
8118 if (!ordered)
8119 break;
8120 else {
8121 btrfs_err(root->fs_info,
8122 "found ordered extent %llu %llu on inode cleanup",
8123 ordered->file_offset, ordered->num_bytes);
8124
8125 if (!freespace_inode)
8126 btrfs_lockdep_acquire(root->fs_info, btrfs_ordered_extent);
8127
8128 btrfs_remove_ordered_extent(inode, ordered);
8129 btrfs_put_ordered_extent(ordered);
8130 btrfs_put_ordered_extent(ordered);
8131 }
8132 }
8133 btrfs_qgroup_check_reserved_leak(inode);
8134 btrfs_del_inode_from_root(inode);
8135 btrfs_drop_extent_map_range(inode, 0, (u64)-1, false);
8136 btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1);
8137 btrfs_put_root(inode->root);
8138 }
8139
btrfs_drop_inode(struct inode * inode)8140 int btrfs_drop_inode(struct inode *inode)
8141 {
8142 struct btrfs_root *root = BTRFS_I(inode)->root;
8143
8144 if (root == NULL)
8145 return 1;
8146
8147 /* the snap/subvol tree is on deleting */
8148 if (btrfs_root_refs(&root->root_item) == 0)
8149 return 1;
8150 else
8151 return inode_generic_drop(inode);
8152 }
8153
init_once(void * foo)8154 static void init_once(void *foo)
8155 {
8156 struct btrfs_inode *ei = foo;
8157
8158 inode_init_once(&ei->vfs_inode);
8159 }
8160
btrfs_destroy_cachep(void)8161 void __cold btrfs_destroy_cachep(void)
8162 {
8163 /*
8164 * Make sure all delayed rcu free inodes are flushed before we
8165 * destroy cache.
8166 */
8167 rcu_barrier();
8168 kmem_cache_destroy(btrfs_inode_cachep);
8169 }
8170
btrfs_init_cachep(void)8171 int __init btrfs_init_cachep(void)
8172 {
8173 btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
8174 sizeof(struct btrfs_inode), 0,
8175 SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT,
8176 init_once);
8177 if (!btrfs_inode_cachep)
8178 return -ENOMEM;
8179
8180 return 0;
8181 }
8182
btrfs_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int flags)8183 static int btrfs_getattr(struct mnt_idmap *idmap,
8184 const struct path *path, struct kstat *stat,
8185 u32 request_mask, unsigned int flags)
8186 {
8187 u64 delalloc_bytes;
8188 u64 inode_bytes;
8189 struct inode *inode = d_inode(path->dentry);
8190 u32 blocksize = btrfs_sb(inode->i_sb)->sectorsize;
8191 u32 bi_flags = BTRFS_I(inode)->flags;
8192 u32 bi_ro_flags = BTRFS_I(inode)->ro_flags;
8193
8194 stat->result_mask |= STATX_BTIME;
8195 stat->btime.tv_sec = BTRFS_I(inode)->i_otime_sec;
8196 stat->btime.tv_nsec = BTRFS_I(inode)->i_otime_nsec;
8197 if (bi_flags & BTRFS_INODE_APPEND)
8198 stat->attributes |= STATX_ATTR_APPEND;
8199 if (bi_flags & BTRFS_INODE_COMPRESS)
8200 stat->attributes |= STATX_ATTR_COMPRESSED;
8201 if (bi_flags & BTRFS_INODE_IMMUTABLE)
8202 stat->attributes |= STATX_ATTR_IMMUTABLE;
8203 if (bi_flags & BTRFS_INODE_NODUMP)
8204 stat->attributes |= STATX_ATTR_NODUMP;
8205 if (bi_ro_flags & BTRFS_INODE_RO_VERITY)
8206 stat->attributes |= STATX_ATTR_VERITY;
8207
8208 stat->attributes_mask |= (STATX_ATTR_APPEND |
8209 STATX_ATTR_COMPRESSED |
8210 STATX_ATTR_IMMUTABLE |
8211 STATX_ATTR_NODUMP);
8212
8213 generic_fillattr(idmap, request_mask, inode, stat);
8214 stat->dev = BTRFS_I(inode)->root->anon_dev;
8215
8216 stat->subvol = btrfs_root_id(BTRFS_I(inode)->root);
8217 stat->result_mask |= STATX_SUBVOL;
8218
8219 spin_lock(&BTRFS_I(inode)->lock);
8220 delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes;
8221 inode_bytes = inode_get_bytes(inode);
8222 spin_unlock(&BTRFS_I(inode)->lock);
8223 stat->blocks = (ALIGN(inode_bytes, blocksize) +
8224 ALIGN(delalloc_bytes, blocksize)) >> SECTOR_SHIFT;
8225 return 0;
8226 }
8227
btrfs_rename_exchange(struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry)8228 static int btrfs_rename_exchange(struct inode *old_dir,
8229 struct dentry *old_dentry,
8230 struct inode *new_dir,
8231 struct dentry *new_dentry)
8232 {
8233 struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir);
8234 struct btrfs_trans_handle *trans;
8235 unsigned int trans_num_items;
8236 struct btrfs_root *root = BTRFS_I(old_dir)->root;
8237 struct btrfs_root *dest = BTRFS_I(new_dir)->root;
8238 struct inode *new_inode = new_dentry->d_inode;
8239 struct inode *old_inode = old_dentry->d_inode;
8240 struct btrfs_rename_ctx old_rename_ctx;
8241 struct btrfs_rename_ctx new_rename_ctx;
8242 u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
8243 u64 new_ino = btrfs_ino(BTRFS_I(new_inode));
8244 u64 old_idx = 0;
8245 u64 new_idx = 0;
8246 int ret;
8247 int ret2;
8248 bool need_abort = false;
8249 bool logs_pinned = false;
8250 struct fscrypt_name old_fname, new_fname;
8251 struct fscrypt_str *old_name, *new_name;
8252
8253 /*
8254 * For non-subvolumes allow exchange only within one subvolume, in the
8255 * same inode namespace. Two subvolumes (represented as directory) can
8256 * be exchanged as they're a logical link and have a fixed inode number.
8257 */
8258 if (root != dest &&
8259 (old_ino != BTRFS_FIRST_FREE_OBJECTID ||
8260 new_ino != BTRFS_FIRST_FREE_OBJECTID))
8261 return -EXDEV;
8262
8263 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname);
8264 if (ret)
8265 return ret;
8266
8267 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname);
8268 if (ret) {
8269 fscrypt_free_filename(&old_fname);
8270 return ret;
8271 }
8272
8273 old_name = &old_fname.disk_name;
8274 new_name = &new_fname.disk_name;
8275
8276 /* close the race window with snapshot create/destroy ioctl */
8277 if (old_ino == BTRFS_FIRST_FREE_OBJECTID ||
8278 new_ino == BTRFS_FIRST_FREE_OBJECTID)
8279 down_read(&fs_info->subvol_sem);
8280
8281 /*
8282 * For each inode:
8283 * 1 to remove old dir item
8284 * 1 to remove old dir index
8285 * 1 to add new dir item
8286 * 1 to add new dir index
8287 * 1 to update parent inode
8288 *
8289 * If the parents are the same, we only need to account for one
8290 */
8291 trans_num_items = (old_dir == new_dir ? 9 : 10);
8292 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8293 /*
8294 * 1 to remove old root ref
8295 * 1 to remove old root backref
8296 * 1 to add new root ref
8297 * 1 to add new root backref
8298 */
8299 trans_num_items += 4;
8300 } else {
8301 /*
8302 * 1 to update inode item
8303 * 1 to remove old inode ref
8304 * 1 to add new inode ref
8305 */
8306 trans_num_items += 3;
8307 }
8308 if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
8309 trans_num_items += 4;
8310 else
8311 trans_num_items += 3;
8312 trans = btrfs_start_transaction(root, trans_num_items);
8313 if (IS_ERR(trans)) {
8314 ret = PTR_ERR(trans);
8315 goto out_notrans;
8316 }
8317
8318 if (dest != root) {
8319 ret = btrfs_record_root_in_trans(trans, dest);
8320 if (ret)
8321 goto out_fail;
8322 }
8323
8324 /*
8325 * We need to find a free sequence number both in the source and
8326 * in the destination directory for the exchange.
8327 */
8328 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx);
8329 if (ret)
8330 goto out_fail;
8331 ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx);
8332 if (ret)
8333 goto out_fail;
8334
8335 BTRFS_I(old_inode)->dir_index = 0ULL;
8336 BTRFS_I(new_inode)->dir_index = 0ULL;
8337
8338 /* Reference for the source. */
8339 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8340 /* force full log commit if subvolume involved. */
8341 btrfs_set_log_full_commit(trans);
8342 } else {
8343 ret = btrfs_insert_inode_ref(trans, dest, new_name, old_ino,
8344 btrfs_ino(BTRFS_I(new_dir)),
8345 old_idx);
8346 if (ret)
8347 goto out_fail;
8348 need_abort = true;
8349 }
8350
8351 /* And now for the dest. */
8352 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
8353 /* force full log commit if subvolume involved. */
8354 btrfs_set_log_full_commit(trans);
8355 } else {
8356 ret = btrfs_insert_inode_ref(trans, root, old_name, new_ino,
8357 btrfs_ino(BTRFS_I(old_dir)),
8358 new_idx);
8359 if (ret) {
8360 if (unlikely(need_abort))
8361 btrfs_abort_transaction(trans, ret);
8362 goto out_fail;
8363 }
8364 }
8365
8366 /* Update inode version and ctime/mtime. */
8367 inode_inc_iversion(old_dir);
8368 inode_inc_iversion(new_dir);
8369 inode_inc_iversion(old_inode);
8370 inode_inc_iversion(new_inode);
8371 simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
8372
8373 if (old_ino != BTRFS_FIRST_FREE_OBJECTID &&
8374 new_ino != BTRFS_FIRST_FREE_OBJECTID) {
8375 /*
8376 * If we are renaming in the same directory (and it's not for
8377 * root entries) pin the log early to prevent any concurrent
8378 * task from logging the directory after we removed the old
8379 * entries and before we add the new entries, otherwise that
8380 * task can sync a log without any entry for the inodes we are
8381 * renaming and therefore replaying that log, if a power failure
8382 * happens after syncing the log, would result in deleting the
8383 * inodes.
8384 *
8385 * If the rename affects two different directories, we want to
8386 * make sure the that there's no log commit that contains
8387 * updates for only one of the directories but not for the
8388 * other.
8389 *
8390 * If we are renaming an entry for a root, we don't care about
8391 * log updates since we called btrfs_set_log_full_commit().
8392 */
8393 btrfs_pin_log_trans(root);
8394 btrfs_pin_log_trans(dest);
8395 logs_pinned = true;
8396 }
8397
8398 if (old_dentry->d_parent != new_dentry->d_parent) {
8399 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
8400 BTRFS_I(old_inode), true);
8401 btrfs_record_unlink_dir(trans, BTRFS_I(new_dir),
8402 BTRFS_I(new_inode), true);
8403 }
8404
8405 /* src is a subvolume */
8406 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8407 ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry);
8408 if (unlikely(ret)) {
8409 btrfs_abort_transaction(trans, ret);
8410 goto out_fail;
8411 }
8412 } else { /* src is an inode */
8413 ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
8414 BTRFS_I(old_dentry->d_inode),
8415 old_name, &old_rename_ctx);
8416 if (unlikely(ret)) {
8417 btrfs_abort_transaction(trans, ret);
8418 goto out_fail;
8419 }
8420 ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
8421 if (unlikely(ret)) {
8422 btrfs_abort_transaction(trans, ret);
8423 goto out_fail;
8424 }
8425 }
8426
8427 /* dest is a subvolume */
8428 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
8429 ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
8430 if (unlikely(ret)) {
8431 btrfs_abort_transaction(trans, ret);
8432 goto out_fail;
8433 }
8434 } else { /* dest is an inode */
8435 ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir),
8436 BTRFS_I(new_dentry->d_inode),
8437 new_name, &new_rename_ctx);
8438 if (unlikely(ret)) {
8439 btrfs_abort_transaction(trans, ret);
8440 goto out_fail;
8441 }
8442 ret = btrfs_update_inode(trans, BTRFS_I(new_inode));
8443 if (unlikely(ret)) {
8444 btrfs_abort_transaction(trans, ret);
8445 goto out_fail;
8446 }
8447 }
8448
8449 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
8450 new_name, 0, old_idx);
8451 if (unlikely(ret)) {
8452 btrfs_abort_transaction(trans, ret);
8453 goto out_fail;
8454 }
8455
8456 ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode),
8457 old_name, 0, new_idx);
8458 if (unlikely(ret)) {
8459 btrfs_abort_transaction(trans, ret);
8460 goto out_fail;
8461 }
8462
8463 if (old_inode->i_nlink == 1)
8464 BTRFS_I(old_inode)->dir_index = old_idx;
8465 if (new_inode->i_nlink == 1)
8466 BTRFS_I(new_inode)->dir_index = new_idx;
8467
8468 /*
8469 * Do the log updates for all inodes.
8470 *
8471 * If either entry is for a root we don't need to update the logs since
8472 * we've called btrfs_set_log_full_commit() before.
8473 */
8474 if (logs_pinned) {
8475 btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
8476 old_rename_ctx.index, new_dentry->d_parent);
8477 btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir),
8478 new_rename_ctx.index, old_dentry->d_parent);
8479 }
8480
8481 out_fail:
8482 if (logs_pinned) {
8483 btrfs_end_log_trans(root);
8484 btrfs_end_log_trans(dest);
8485 }
8486 ret2 = btrfs_end_transaction(trans);
8487 ret = ret ? ret : ret2;
8488 out_notrans:
8489 if (new_ino == BTRFS_FIRST_FREE_OBJECTID ||
8490 old_ino == BTRFS_FIRST_FREE_OBJECTID)
8491 up_read(&fs_info->subvol_sem);
8492
8493 fscrypt_free_filename(&new_fname);
8494 fscrypt_free_filename(&old_fname);
8495 return ret;
8496 }
8497
new_whiteout_inode(struct mnt_idmap * idmap,struct inode * dir)8498 static struct inode *new_whiteout_inode(struct mnt_idmap *idmap,
8499 struct inode *dir)
8500 {
8501 struct inode *inode;
8502
8503 inode = new_inode(dir->i_sb);
8504 if (inode) {
8505 inode_init_owner(idmap, inode, dir,
8506 S_IFCHR | WHITEOUT_MODE);
8507 inode->i_op = &btrfs_special_inode_operations;
8508 init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
8509 }
8510 return inode;
8511 }
8512
btrfs_rename(struct mnt_idmap * idmap,struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry,unsigned int flags)8513 static int btrfs_rename(struct mnt_idmap *idmap,
8514 struct inode *old_dir, struct dentry *old_dentry,
8515 struct inode *new_dir, struct dentry *new_dentry,
8516 unsigned int flags)
8517 {
8518 struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir);
8519 struct btrfs_new_inode_args whiteout_args = {
8520 .dir = old_dir,
8521 .dentry = old_dentry,
8522 };
8523 struct btrfs_trans_handle *trans;
8524 unsigned int trans_num_items;
8525 struct btrfs_root *root = BTRFS_I(old_dir)->root;
8526 struct btrfs_root *dest = BTRFS_I(new_dir)->root;
8527 struct inode *new_inode = d_inode(new_dentry);
8528 struct inode *old_inode = d_inode(old_dentry);
8529 struct btrfs_rename_ctx rename_ctx;
8530 u64 index = 0;
8531 int ret;
8532 int ret2;
8533 u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
8534 struct fscrypt_name old_fname, new_fname;
8535 bool logs_pinned = false;
8536
8537 if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
8538 return -EPERM;
8539
8540 /* we only allow rename subvolume link between subvolumes */
8541 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
8542 return -EXDEV;
8543
8544 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
8545 (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID))
8546 return -ENOTEMPTY;
8547
8548 if (S_ISDIR(old_inode->i_mode) && new_inode &&
8549 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
8550 return -ENOTEMPTY;
8551
8552 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname);
8553 if (ret)
8554 return ret;
8555
8556 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname);
8557 if (ret) {
8558 fscrypt_free_filename(&old_fname);
8559 return ret;
8560 }
8561
8562 /* check for collisions, even if the name isn't there */
8563 ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, &new_fname.disk_name);
8564 if (ret) {
8565 if (ret == -EEXIST) {
8566 /* we shouldn't get
8567 * eexist without a new_inode */
8568 if (WARN_ON(!new_inode)) {
8569 goto out_fscrypt_names;
8570 }
8571 } else {
8572 /* maybe -EOVERFLOW */
8573 goto out_fscrypt_names;
8574 }
8575 }
8576 ret = 0;
8577
8578 /*
8579 * we're using rename to replace one file with another. Start IO on it
8580 * now so we don't add too much work to the end of the transaction
8581 */
8582 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
8583 filemap_flush(old_inode->i_mapping);
8584
8585 if (flags & RENAME_WHITEOUT) {
8586 whiteout_args.inode = new_whiteout_inode(idmap, old_dir);
8587 if (!whiteout_args.inode) {
8588 ret = -ENOMEM;
8589 goto out_fscrypt_names;
8590 }
8591 ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items);
8592 if (ret)
8593 goto out_whiteout_inode;
8594 } else {
8595 /* 1 to update the old parent inode. */
8596 trans_num_items = 1;
8597 }
8598
8599 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8600 /* Close the race window with snapshot create/destroy ioctl */
8601 down_read(&fs_info->subvol_sem);
8602 /*
8603 * 1 to remove old root ref
8604 * 1 to remove old root backref
8605 * 1 to add new root ref
8606 * 1 to add new root backref
8607 */
8608 trans_num_items += 4;
8609 } else {
8610 /*
8611 * 1 to update inode
8612 * 1 to remove old inode ref
8613 * 1 to add new inode ref
8614 */
8615 trans_num_items += 3;
8616 }
8617 /*
8618 * 1 to remove old dir item
8619 * 1 to remove old dir index
8620 * 1 to add new dir item
8621 * 1 to add new dir index
8622 */
8623 trans_num_items += 4;
8624 /* 1 to update new parent inode if it's not the same as the old parent */
8625 if (new_dir != old_dir)
8626 trans_num_items++;
8627 if (new_inode) {
8628 /*
8629 * 1 to update inode
8630 * 1 to remove inode ref
8631 * 1 to remove dir item
8632 * 1 to remove dir index
8633 * 1 to possibly add orphan item
8634 */
8635 trans_num_items += 5;
8636 }
8637 trans = btrfs_start_transaction(root, trans_num_items);
8638 if (IS_ERR(trans)) {
8639 ret = PTR_ERR(trans);
8640 goto out_notrans;
8641 }
8642
8643 if (dest != root) {
8644 ret = btrfs_record_root_in_trans(trans, dest);
8645 if (ret)
8646 goto out_fail;
8647 }
8648
8649 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index);
8650 if (ret)
8651 goto out_fail;
8652
8653 BTRFS_I(old_inode)->dir_index = 0ULL;
8654 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
8655 /* force full log commit if subvolume involved. */
8656 btrfs_set_log_full_commit(trans);
8657 } else {
8658 ret = btrfs_insert_inode_ref(trans, dest, &new_fname.disk_name,
8659 old_ino, btrfs_ino(BTRFS_I(new_dir)),
8660 index);
8661 if (ret)
8662 goto out_fail;
8663 }
8664
8665 inode_inc_iversion(old_dir);
8666 inode_inc_iversion(new_dir);
8667 inode_inc_iversion(old_inode);
8668 simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
8669
8670 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
8671 /*
8672 * If we are renaming in the same directory (and it's not a
8673 * root entry) pin the log to prevent any concurrent task from
8674 * logging the directory after we removed the old entry and
8675 * before we add the new entry, otherwise that task can sync
8676 * a log without any entry for the inode we are renaming and
8677 * therefore replaying that log, if a power failure happens
8678 * after syncing the log, would result in deleting the inode.
8679 *
8680 * If the rename affects two different directories, we want to
8681 * make sure the that there's no log commit that contains
8682 * updates for only one of the directories but not for the
8683 * other.
8684 *
8685 * If we are renaming an entry for a root, we don't care about
8686 * log updates since we called btrfs_set_log_full_commit().
8687 */
8688 btrfs_pin_log_trans(root);
8689 btrfs_pin_log_trans(dest);
8690 logs_pinned = true;
8691 }
8692
8693 if (old_dentry->d_parent != new_dentry->d_parent)
8694 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
8695 BTRFS_I(old_inode), true);
8696
8697 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
8698 ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry);
8699 if (unlikely(ret)) {
8700 btrfs_abort_transaction(trans, ret);
8701 goto out_fail;
8702 }
8703 } else {
8704 ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
8705 BTRFS_I(d_inode(old_dentry)),
8706 &old_fname.disk_name, &rename_ctx);
8707 if (unlikely(ret)) {
8708 btrfs_abort_transaction(trans, ret);
8709 goto out_fail;
8710 }
8711 ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
8712 if (unlikely(ret)) {
8713 btrfs_abort_transaction(trans, ret);
8714 goto out_fail;
8715 }
8716 }
8717
8718 if (new_inode) {
8719 inode_inc_iversion(new_inode);
8720 if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
8721 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
8722 ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
8723 if (unlikely(ret)) {
8724 btrfs_abort_transaction(trans, ret);
8725 goto out_fail;
8726 }
8727 BUG_ON(new_inode->i_nlink == 0);
8728 } else {
8729 ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir),
8730 BTRFS_I(d_inode(new_dentry)),
8731 &new_fname.disk_name);
8732 if (unlikely(ret)) {
8733 btrfs_abort_transaction(trans, ret);
8734 goto out_fail;
8735 }
8736 }
8737 if (new_inode->i_nlink == 0) {
8738 ret = btrfs_orphan_add(trans,
8739 BTRFS_I(d_inode(new_dentry)));
8740 if (unlikely(ret)) {
8741 btrfs_abort_transaction(trans, ret);
8742 goto out_fail;
8743 }
8744 }
8745 }
8746
8747 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
8748 &new_fname.disk_name, 0, index);
8749 if (unlikely(ret)) {
8750 btrfs_abort_transaction(trans, ret);
8751 goto out_fail;
8752 }
8753
8754 if (old_inode->i_nlink == 1)
8755 BTRFS_I(old_inode)->dir_index = index;
8756
8757 if (logs_pinned)
8758 btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
8759 rename_ctx.index, new_dentry->d_parent);
8760
8761 if (flags & RENAME_WHITEOUT) {
8762 ret = btrfs_create_new_inode(trans, &whiteout_args);
8763 if (unlikely(ret)) {
8764 btrfs_abort_transaction(trans, ret);
8765 goto out_fail;
8766 } else {
8767 unlock_new_inode(whiteout_args.inode);
8768 iput(whiteout_args.inode);
8769 whiteout_args.inode = NULL;
8770 }
8771 }
8772 out_fail:
8773 if (logs_pinned) {
8774 btrfs_end_log_trans(root);
8775 btrfs_end_log_trans(dest);
8776 }
8777 ret2 = btrfs_end_transaction(trans);
8778 ret = ret ? ret : ret2;
8779 out_notrans:
8780 if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
8781 up_read(&fs_info->subvol_sem);
8782 if (flags & RENAME_WHITEOUT)
8783 btrfs_new_inode_args_destroy(&whiteout_args);
8784 out_whiteout_inode:
8785 if (flags & RENAME_WHITEOUT)
8786 iput(whiteout_args.inode);
8787 out_fscrypt_names:
8788 fscrypt_free_filename(&old_fname);
8789 fscrypt_free_filename(&new_fname);
8790 return ret;
8791 }
8792
btrfs_rename2(struct mnt_idmap * idmap,struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry,unsigned int flags)8793 static int btrfs_rename2(struct mnt_idmap *idmap, struct inode *old_dir,
8794 struct dentry *old_dentry, struct inode *new_dir,
8795 struct dentry *new_dentry, unsigned int flags)
8796 {
8797 int ret;
8798
8799 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
8800 return -EINVAL;
8801
8802 if (flags & RENAME_EXCHANGE)
8803 ret = btrfs_rename_exchange(old_dir, old_dentry, new_dir,
8804 new_dentry);
8805 else
8806 ret = btrfs_rename(idmap, old_dir, old_dentry, new_dir,
8807 new_dentry, flags);
8808
8809 btrfs_btree_balance_dirty(BTRFS_I(new_dir)->root->fs_info);
8810
8811 return ret;
8812 }
8813
8814 struct btrfs_delalloc_work {
8815 struct inode *inode;
8816 struct completion completion;
8817 struct list_head list;
8818 struct btrfs_work work;
8819 };
8820
btrfs_run_delalloc_work(struct btrfs_work * work)8821 static void btrfs_run_delalloc_work(struct btrfs_work *work)
8822 {
8823 struct btrfs_delalloc_work *delalloc_work;
8824 struct inode *inode;
8825
8826 delalloc_work = container_of(work, struct btrfs_delalloc_work,
8827 work);
8828 inode = delalloc_work->inode;
8829 filemap_flush(inode->i_mapping);
8830 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
8831 &BTRFS_I(inode)->runtime_flags))
8832 filemap_flush(inode->i_mapping);
8833
8834 iput(inode);
8835 complete(&delalloc_work->completion);
8836 }
8837
btrfs_alloc_delalloc_work(struct inode * inode)8838 static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode)
8839 {
8840 struct btrfs_delalloc_work *work;
8841
8842 work = kmalloc_obj(*work, GFP_NOFS);
8843 if (!work)
8844 return NULL;
8845
8846 init_completion(&work->completion);
8847 INIT_LIST_HEAD(&work->list);
8848 work->inode = inode;
8849 btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL);
8850
8851 return work;
8852 }
8853
8854 /*
8855 * some fairly slow code that needs optimization. This walks the list
8856 * of all the inodes with pending delalloc and forces them to disk.
8857 */
start_delalloc_inodes(struct btrfs_root * root,long * nr_to_write,bool snapshot,bool in_reclaim_context)8858 static int start_delalloc_inodes(struct btrfs_root *root, long *nr_to_write,
8859 bool snapshot, bool in_reclaim_context)
8860 {
8861 struct btrfs_delalloc_work *work, *next;
8862 LIST_HEAD(works);
8863 LIST_HEAD(splice);
8864 int ret = 0;
8865
8866 mutex_lock(&root->delalloc_mutex);
8867 spin_lock(&root->delalloc_lock);
8868 list_splice_init(&root->delalloc_inodes, &splice);
8869 while (!list_empty(&splice)) {
8870 struct btrfs_inode *inode;
8871 struct inode *tmp_inode;
8872
8873 inode = list_first_entry(&splice, struct btrfs_inode, delalloc_inodes);
8874
8875 list_move_tail(&inode->delalloc_inodes, &root->delalloc_inodes);
8876
8877 if (in_reclaim_context &&
8878 test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags))
8879 continue;
8880
8881 tmp_inode = igrab(&inode->vfs_inode);
8882 if (!tmp_inode) {
8883 cond_resched_lock(&root->delalloc_lock);
8884 continue;
8885 }
8886 spin_unlock(&root->delalloc_lock);
8887
8888 if (snapshot)
8889 set_bit(BTRFS_INODE_SNAPSHOT_FLUSH, &inode->runtime_flags);
8890 if (nr_to_write == NULL) {
8891 work = btrfs_alloc_delalloc_work(tmp_inode);
8892 if (!work) {
8893 iput(tmp_inode);
8894 ret = -ENOMEM;
8895 goto out;
8896 }
8897 list_add_tail(&work->list, &works);
8898 btrfs_queue_work(root->fs_info->flush_workers,
8899 &work->work);
8900 } else {
8901 ret = filemap_flush_nr(tmp_inode->i_mapping,
8902 nr_to_write);
8903 btrfs_add_delayed_iput(inode);
8904
8905 if (ret || *nr_to_write <= 0)
8906 goto out;
8907 }
8908 cond_resched();
8909 spin_lock(&root->delalloc_lock);
8910 }
8911 spin_unlock(&root->delalloc_lock);
8912
8913 out:
8914 list_for_each_entry_safe(work, next, &works, list) {
8915 list_del_init(&work->list);
8916 wait_for_completion(&work->completion);
8917 kfree(work);
8918 }
8919
8920 if (!list_empty(&splice)) {
8921 spin_lock(&root->delalloc_lock);
8922 list_splice_tail(&splice, &root->delalloc_inodes);
8923 spin_unlock(&root->delalloc_lock);
8924 }
8925 mutex_unlock(&root->delalloc_mutex);
8926 return ret;
8927 }
8928
btrfs_start_delalloc_snapshot(struct btrfs_root * root,bool in_reclaim_context)8929 int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context)
8930 {
8931 struct btrfs_fs_info *fs_info = root->fs_info;
8932
8933 if (BTRFS_FS_ERROR(fs_info))
8934 return -EROFS;
8935 return start_delalloc_inodes(root, NULL, true, in_reclaim_context);
8936 }
8937
btrfs_start_delalloc_roots(struct btrfs_fs_info * fs_info,long nr,bool in_reclaim_context)8938 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
8939 bool in_reclaim_context)
8940 {
8941 long *nr_to_write = nr == LONG_MAX ? NULL : &nr;
8942 struct btrfs_root *root;
8943 LIST_HEAD(splice);
8944 int ret;
8945
8946 if (BTRFS_FS_ERROR(fs_info))
8947 return -EROFS;
8948
8949 mutex_lock(&fs_info->delalloc_root_mutex);
8950 spin_lock(&fs_info->delalloc_root_lock);
8951 list_splice_init(&fs_info->delalloc_roots, &splice);
8952 while (!list_empty(&splice)) {
8953 root = list_first_entry(&splice, struct btrfs_root,
8954 delalloc_root);
8955 root = btrfs_grab_root(root);
8956 BUG_ON(!root);
8957 list_move_tail(&root->delalloc_root,
8958 &fs_info->delalloc_roots);
8959 spin_unlock(&fs_info->delalloc_root_lock);
8960
8961 ret = start_delalloc_inodes(root, nr_to_write, false,
8962 in_reclaim_context);
8963 btrfs_put_root(root);
8964 if (ret < 0 || nr <= 0)
8965 goto out;
8966 spin_lock(&fs_info->delalloc_root_lock);
8967 }
8968 spin_unlock(&fs_info->delalloc_root_lock);
8969
8970 ret = 0;
8971 out:
8972 if (!list_empty(&splice)) {
8973 spin_lock(&fs_info->delalloc_root_lock);
8974 list_splice_tail(&splice, &fs_info->delalloc_roots);
8975 spin_unlock(&fs_info->delalloc_root_lock);
8976 }
8977 mutex_unlock(&fs_info->delalloc_root_mutex);
8978 return ret;
8979 }
8980
btrfs_symlink(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,const char * symname)8981 static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
8982 struct dentry *dentry, const char *symname)
8983 {
8984 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
8985 struct btrfs_trans_handle *trans;
8986 struct btrfs_root *root = BTRFS_I(dir)->root;
8987 struct btrfs_path *path;
8988 struct btrfs_key key;
8989 struct inode *inode;
8990 struct btrfs_new_inode_args new_inode_args = {
8991 .dir = dir,
8992 .dentry = dentry,
8993 };
8994 unsigned int trans_num_items;
8995 int ret;
8996 int name_len;
8997 int datasize;
8998 unsigned long ptr;
8999 struct btrfs_file_extent_item *ei;
9000 struct extent_buffer *leaf;
9001
9002 name_len = strlen(symname);
9003 /*
9004 * Symlinks utilize uncompressed inline extent data, which should not
9005 * reach block size.
9006 */
9007 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
9008 name_len >= fs_info->sectorsize)
9009 return -ENAMETOOLONG;
9010
9011 inode = new_inode(dir->i_sb);
9012 if (!inode)
9013 return -ENOMEM;
9014 inode_init_owner(idmap, inode, dir, S_IFLNK | S_IRWXUGO);
9015 inode->i_op = &btrfs_symlink_inode_operations;
9016 inode_nohighmem(inode);
9017 inode->i_mapping->a_ops = &btrfs_aops;
9018 btrfs_i_size_write(BTRFS_I(inode), name_len);
9019 inode_set_bytes(inode, name_len);
9020
9021 new_inode_args.inode = inode;
9022 ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
9023 if (ret)
9024 goto out_inode;
9025 /* 1 additional item for the inline extent */
9026 trans_num_items++;
9027
9028 trans = btrfs_start_transaction(root, trans_num_items);
9029 if (IS_ERR(trans)) {
9030 ret = PTR_ERR(trans);
9031 goto out_new_inode_args;
9032 }
9033
9034 ret = btrfs_create_new_inode(trans, &new_inode_args);
9035 if (ret)
9036 goto out;
9037
9038 path = btrfs_alloc_path();
9039 if (unlikely(!path)) {
9040 ret = -ENOMEM;
9041 btrfs_abort_transaction(trans, ret);
9042 discard_new_inode(inode);
9043 inode = NULL;
9044 goto out;
9045 }
9046 key.objectid = btrfs_ino(BTRFS_I(inode));
9047 key.type = BTRFS_EXTENT_DATA_KEY;
9048 key.offset = 0;
9049 datasize = btrfs_file_extent_calc_inline_size(name_len);
9050 ret = btrfs_insert_empty_item(trans, root, path, &key, datasize);
9051 if (unlikely(ret)) {
9052 btrfs_abort_transaction(trans, ret);
9053 btrfs_free_path(path);
9054 discard_new_inode(inode);
9055 inode = NULL;
9056 goto out;
9057 }
9058 leaf = path->nodes[0];
9059 ei = btrfs_item_ptr(leaf, path->slots[0],
9060 struct btrfs_file_extent_item);
9061 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
9062 btrfs_set_file_extent_type(leaf, ei,
9063 BTRFS_FILE_EXTENT_INLINE);
9064 btrfs_set_file_extent_encryption(leaf, ei, 0);
9065 btrfs_set_file_extent_compression(leaf, ei, 0);
9066 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
9067 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
9068
9069 ptr = btrfs_file_extent_inline_start(ei);
9070 write_extent_buffer(leaf, symname, ptr, name_len);
9071 btrfs_free_path(path);
9072
9073 d_instantiate_new(dentry, inode);
9074 ret = 0;
9075 out:
9076 btrfs_end_transaction(trans);
9077 btrfs_btree_balance_dirty(fs_info);
9078 out_new_inode_args:
9079 btrfs_new_inode_args_destroy(&new_inode_args);
9080 out_inode:
9081 if (ret)
9082 iput(inode);
9083 return ret;
9084 }
9085
insert_prealloc_file_extent(struct btrfs_trans_handle * trans_in,struct btrfs_inode * inode,struct btrfs_key * ins,u64 file_offset)9086 static struct btrfs_trans_handle *insert_prealloc_file_extent(
9087 struct btrfs_trans_handle *trans_in,
9088 struct btrfs_inode *inode,
9089 struct btrfs_key *ins,
9090 u64 file_offset)
9091 {
9092 struct btrfs_file_extent_item stack_fi;
9093 struct btrfs_replace_extent_info extent_info;
9094 struct btrfs_trans_handle *trans = trans_in;
9095 struct btrfs_path *path;
9096 u64 start = ins->objectid;
9097 u64 len = ins->offset;
9098 u64 qgroup_released = 0;
9099 int ret;
9100
9101 memset(&stack_fi, 0, sizeof(stack_fi));
9102
9103 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_PREALLOC);
9104 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, start);
9105 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, len);
9106 btrfs_set_stack_file_extent_num_bytes(&stack_fi, len);
9107 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, len);
9108 btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE);
9109 /* Encryption and other encoding is reserved and all 0 */
9110
9111 ret = btrfs_qgroup_release_data(inode, file_offset, len, &qgroup_released);
9112 if (ret < 0)
9113 return ERR_PTR(ret);
9114
9115 if (trans) {
9116 ret = insert_reserved_file_extent(trans, inode,
9117 file_offset, &stack_fi,
9118 true, qgroup_released);
9119 if (ret)
9120 goto free_qgroup;
9121 return trans;
9122 }
9123
9124 extent_info.disk_offset = start;
9125 extent_info.disk_len = len;
9126 extent_info.data_offset = 0;
9127 extent_info.data_len = len;
9128 extent_info.file_offset = file_offset;
9129 extent_info.extent_buf = (char *)&stack_fi;
9130 extent_info.is_new_extent = true;
9131 extent_info.update_times = true;
9132 extent_info.qgroup_reserved = qgroup_released;
9133 extent_info.insertions = 0;
9134
9135 path = btrfs_alloc_path();
9136 if (!path) {
9137 ret = -ENOMEM;
9138 goto free_qgroup;
9139 }
9140
9141 ret = btrfs_replace_file_extents(inode, path, file_offset,
9142 file_offset + len - 1, &extent_info,
9143 &trans);
9144 btrfs_free_path(path);
9145 if (ret)
9146 goto free_qgroup;
9147 return trans;
9148
9149 free_qgroup:
9150 /*
9151 * We have released qgroup data range at the beginning of the function,
9152 * and normally qgroup_released bytes will be freed when committing
9153 * transaction.
9154 * But if we error out early, we have to free what we have released
9155 * or we leak qgroup data reservation.
9156 */
9157 btrfs_qgroup_free_refroot(inode->root->fs_info,
9158 btrfs_root_id(inode->root), qgroup_released,
9159 BTRFS_QGROUP_RSV_DATA);
9160 return ERR_PTR(ret);
9161 }
9162
__btrfs_prealloc_file_range(struct inode * inode,int mode,u64 start,u64 num_bytes,u64 min_size,loff_t actual_len,u64 * alloc_hint,struct btrfs_trans_handle * trans)9163 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
9164 u64 start, u64 num_bytes, u64 min_size,
9165 loff_t actual_len, u64 *alloc_hint,
9166 struct btrfs_trans_handle *trans)
9167 {
9168 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
9169 struct extent_map *em;
9170 struct btrfs_root *root = BTRFS_I(inode)->root;
9171 struct btrfs_key ins;
9172 u64 cur_offset = start;
9173 u64 clear_offset = start;
9174 u64 i_size;
9175 u64 cur_bytes;
9176 u64 last_alloc = (u64)-1;
9177 int ret = 0;
9178 bool own_trans = true;
9179 u64 end = start + num_bytes - 1;
9180
9181 if (trans)
9182 own_trans = false;
9183 while (num_bytes > 0) {
9184 cur_bytes = min_t(u64, num_bytes, SZ_256M);
9185 cur_bytes = max(cur_bytes, min_size);
9186 /*
9187 * If we are severely fragmented we could end up with really
9188 * small allocations, so if the allocator is returning small
9189 * chunks lets make its job easier by only searching for those
9190 * sized chunks.
9191 */
9192 cur_bytes = min(cur_bytes, last_alloc);
9193 ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes,
9194 min_size, 0, *alloc_hint, &ins, true, false);
9195 if (ret)
9196 break;
9197
9198 /*
9199 * We've reserved this space, and thus converted it from
9200 * ->bytes_may_use to ->bytes_reserved. Any error that happens
9201 * from here on out we will only need to clear our reservation
9202 * for the remaining unreserved area, so advance our
9203 * clear_offset by our extent size.
9204 */
9205 clear_offset += ins.offset;
9206
9207 last_alloc = ins.offset;
9208 trans = insert_prealloc_file_extent(trans, BTRFS_I(inode),
9209 &ins, cur_offset);
9210 /*
9211 * Now that we inserted the prealloc extent we can finally
9212 * decrement the number of reservations in the block group.
9213 * If we did it before, we could race with relocation and have
9214 * relocation miss the reserved extent, making it fail later.
9215 */
9216 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
9217 if (IS_ERR(trans)) {
9218 ret = PTR_ERR(trans);
9219 btrfs_free_reserved_extent(fs_info, ins.objectid,
9220 ins.offset, false);
9221 break;
9222 }
9223
9224 em = btrfs_alloc_extent_map();
9225 if (!em) {
9226 btrfs_drop_extent_map_range(BTRFS_I(inode), cur_offset,
9227 cur_offset + ins.offset - 1, false);
9228 btrfs_set_inode_full_sync(BTRFS_I(inode));
9229 goto next;
9230 }
9231
9232 em->start = cur_offset;
9233 em->len = ins.offset;
9234 em->disk_bytenr = ins.objectid;
9235 em->offset = 0;
9236 em->disk_num_bytes = ins.offset;
9237 em->ram_bytes = ins.offset;
9238 em->flags |= EXTENT_FLAG_PREALLOC;
9239 em->generation = trans->transid;
9240
9241 ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, true);
9242 btrfs_free_extent_map(em);
9243 next:
9244 num_bytes -= ins.offset;
9245 cur_offset += ins.offset;
9246 *alloc_hint = ins.objectid + ins.offset;
9247
9248 inode_inc_iversion(inode);
9249 inode_set_ctime_current(inode);
9250 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
9251 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
9252 (actual_len > inode->i_size) &&
9253 (cur_offset > inode->i_size)) {
9254 if (cur_offset > actual_len)
9255 i_size = actual_len;
9256 else
9257 i_size = cur_offset;
9258 i_size_write(inode, i_size);
9259 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
9260 }
9261
9262 ret = btrfs_update_inode(trans, BTRFS_I(inode));
9263
9264 if (unlikely(ret)) {
9265 btrfs_abort_transaction(trans, ret);
9266 if (own_trans)
9267 btrfs_end_transaction(trans);
9268 break;
9269 }
9270
9271 if (own_trans) {
9272 btrfs_end_transaction(trans);
9273 trans = NULL;
9274 }
9275 }
9276 if (clear_offset < end)
9277 btrfs_free_reserved_data_space(BTRFS_I(inode), NULL, clear_offset,
9278 end - clear_offset + 1);
9279 return ret;
9280 }
9281
btrfs_prealloc_file_range(struct inode * inode,int mode,u64 start,u64 num_bytes,u64 min_size,loff_t actual_len,u64 * alloc_hint)9282 int btrfs_prealloc_file_range(struct inode *inode, int mode,
9283 u64 start, u64 num_bytes, u64 min_size,
9284 loff_t actual_len, u64 *alloc_hint)
9285 {
9286 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9287 min_size, actual_len, alloc_hint,
9288 NULL);
9289 }
9290
btrfs_prealloc_file_range_trans(struct inode * inode,struct btrfs_trans_handle * trans,int mode,u64 start,u64 num_bytes,u64 min_size,loff_t actual_len,u64 * alloc_hint)9291 int btrfs_prealloc_file_range_trans(struct inode *inode,
9292 struct btrfs_trans_handle *trans, int mode,
9293 u64 start, u64 num_bytes, u64 min_size,
9294 loff_t actual_len, u64 *alloc_hint)
9295 {
9296 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9297 min_size, actual_len, alloc_hint, trans);
9298 }
9299
9300 /*
9301 * NOTE: in case you are adding MAY_EXEC check for directories:
9302 * we are marking them with IOP_FASTPERM_MAY_EXEC, allowing path lookup to
9303 * elide calls here.
9304 */
btrfs_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)9305 static int btrfs_permission(struct mnt_idmap *idmap,
9306 struct inode *inode, int mask)
9307 {
9308 struct btrfs_root *root = BTRFS_I(inode)->root;
9309 umode_t mode = inode->i_mode;
9310
9311 if (mask & MAY_WRITE &&
9312 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
9313 if (btrfs_root_readonly(root))
9314 return -EROFS;
9315 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
9316 return -EACCES;
9317 }
9318 return generic_permission(idmap, inode, mask);
9319 }
9320
btrfs_tmpfile(struct mnt_idmap * idmap,struct inode * dir,struct file * file,umode_t mode)9321 static int btrfs_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
9322 struct file *file, umode_t mode)
9323 {
9324 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
9325 struct btrfs_trans_handle *trans;
9326 struct btrfs_root *root = BTRFS_I(dir)->root;
9327 struct inode *inode;
9328 struct btrfs_new_inode_args new_inode_args = {
9329 .dir = dir,
9330 .dentry = file->f_path.dentry,
9331 .orphan = true,
9332 };
9333 unsigned int trans_num_items;
9334 int ret;
9335
9336 inode = new_inode(dir->i_sb);
9337 if (!inode)
9338 return -ENOMEM;
9339 inode_init_owner(idmap, inode, dir, mode);
9340 inode->i_fop = &btrfs_file_operations;
9341 inode->i_op = &btrfs_file_inode_operations;
9342 inode->i_mapping->a_ops = &btrfs_aops;
9343
9344 new_inode_args.inode = inode;
9345 ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
9346 if (ret)
9347 goto out_inode;
9348
9349 trans = btrfs_start_transaction(root, trans_num_items);
9350 if (IS_ERR(trans)) {
9351 ret = PTR_ERR(trans);
9352 goto out_new_inode_args;
9353 }
9354
9355 ret = btrfs_create_new_inode(trans, &new_inode_args);
9356
9357 /*
9358 * We set number of links to 0 in btrfs_create_new_inode(), and here we
9359 * set it to 1 because d_tmpfile() will issue a warning if the count is
9360 * 0, through:
9361 *
9362 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
9363 */
9364 set_nlink(inode, 1);
9365
9366 if (!ret) {
9367 d_tmpfile(file, inode);
9368 unlock_new_inode(inode);
9369 mark_inode_dirty(inode);
9370 }
9371
9372 btrfs_end_transaction(trans);
9373 btrfs_btree_balance_dirty(fs_info);
9374 out_new_inode_args:
9375 btrfs_new_inode_args_destroy(&new_inode_args);
9376 out_inode:
9377 if (ret)
9378 iput(inode);
9379 return finish_open_simple(file, ret);
9380 }
9381
btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info * fs_info,int compress_type)9382 int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
9383 int compress_type)
9384 {
9385 switch (compress_type) {
9386 case BTRFS_COMPRESS_NONE:
9387 return BTRFS_ENCODED_IO_COMPRESSION_NONE;
9388 case BTRFS_COMPRESS_ZLIB:
9389 return BTRFS_ENCODED_IO_COMPRESSION_ZLIB;
9390 case BTRFS_COMPRESS_LZO:
9391 /*
9392 * The LZO format depends on the sector size. 64K is the maximum
9393 * sector size that we support.
9394 */
9395 if (fs_info->sectorsize < SZ_4K || fs_info->sectorsize > SZ_64K)
9396 return -EINVAL;
9397 return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K +
9398 (fs_info->sectorsize_bits - 12);
9399 case BTRFS_COMPRESS_ZSTD:
9400 return BTRFS_ENCODED_IO_COMPRESSION_ZSTD;
9401 default:
9402 return -EUCLEAN;
9403 }
9404 }
9405
btrfs_encoded_read_inline(struct kiocb * iocb,struct iov_iter * iter,u64 start,u64 lockend,struct extent_state ** cached_state,u64 extent_start,size_t count,struct btrfs_ioctl_encoded_io_args * encoded,bool * unlocked)9406 static ssize_t btrfs_encoded_read_inline(
9407 struct kiocb *iocb,
9408 struct iov_iter *iter, u64 start,
9409 u64 lockend,
9410 struct extent_state **cached_state,
9411 u64 extent_start, size_t count,
9412 struct btrfs_ioctl_encoded_io_args *encoded,
9413 bool *unlocked)
9414 {
9415 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
9416 struct btrfs_root *root = inode->root;
9417 struct btrfs_fs_info *fs_info = root->fs_info;
9418 struct extent_io_tree *io_tree = &inode->io_tree;
9419 BTRFS_PATH_AUTO_FREE(path);
9420 struct extent_buffer *leaf;
9421 struct btrfs_file_extent_item *item;
9422 u64 ram_bytes;
9423 unsigned long ptr;
9424 void *tmp;
9425 ssize_t ret;
9426 const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
9427
9428 path = btrfs_alloc_path();
9429 if (!path)
9430 return -ENOMEM;
9431
9432 path->nowait = nowait;
9433
9434 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
9435 extent_start, 0);
9436 if (ret) {
9437 if (unlikely(ret > 0)) {
9438 /* The extent item disappeared? */
9439 return -EIO;
9440 }
9441 return ret;
9442 }
9443 leaf = path->nodes[0];
9444 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
9445
9446 ram_bytes = btrfs_file_extent_ram_bytes(leaf, item);
9447 ptr = btrfs_file_extent_inline_start(item);
9448
9449 encoded->len = min_t(u64, extent_start + ram_bytes,
9450 inode->vfs_inode.i_size) - iocb->ki_pos;
9451 ret = btrfs_encoded_io_compression_from_extent(fs_info,
9452 btrfs_file_extent_compression(leaf, item));
9453 if (ret < 0)
9454 return ret;
9455 encoded->compression = ret;
9456 if (encoded->compression) {
9457 size_t inline_size;
9458
9459 inline_size = btrfs_file_extent_inline_item_len(leaf,
9460 path->slots[0]);
9461 if (inline_size > count)
9462 return -ENOBUFS;
9463
9464 count = inline_size;
9465 encoded->unencoded_len = ram_bytes;
9466 encoded->unencoded_offset = iocb->ki_pos - extent_start;
9467 } else {
9468 count = min_t(u64, count, encoded->len);
9469 encoded->len = count;
9470 encoded->unencoded_len = count;
9471 ptr += iocb->ki_pos - extent_start;
9472 }
9473
9474 tmp = kmalloc(count, GFP_NOFS);
9475 if (!tmp)
9476 return -ENOMEM;
9477
9478 read_extent_buffer(leaf, tmp, ptr, count);
9479 btrfs_release_path(path);
9480 btrfs_unlock_extent(io_tree, start, lockend, cached_state);
9481 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9482 *unlocked = true;
9483
9484 ret = copy_to_iter(tmp, count, iter);
9485 if (ret != count)
9486 ret = -EFAULT;
9487 kfree(tmp);
9488
9489 return ret;
9490 }
9491
9492 struct btrfs_encoded_read_private {
9493 struct completion *sync_reads;
9494 void *uring_ctx;
9495 refcount_t pending_refs;
9496 blk_status_t status;
9497 };
9498
btrfs_encoded_read_endio(struct btrfs_bio * bbio)9499 static void btrfs_encoded_read_endio(struct btrfs_bio *bbio)
9500 {
9501 struct btrfs_encoded_read_private *priv = bbio->private;
9502
9503 if (bbio->bio.bi_status) {
9504 /*
9505 * The memory barrier implied by the refcount_dec_and_test() here
9506 * pairs with the memory barrier implied by the refcount_dec_and_test()
9507 * in btrfs_encoded_read_regular_fill_pages() to ensure that
9508 * this write is observed before the load of status in
9509 * btrfs_encoded_read_regular_fill_pages().
9510 */
9511 WRITE_ONCE(priv->status, bbio->bio.bi_status);
9512 }
9513 if (refcount_dec_and_test(&priv->pending_refs)) {
9514 int err = blk_status_to_errno(READ_ONCE(priv->status));
9515
9516 if (priv->uring_ctx) {
9517 btrfs_uring_read_extent_endio(priv->uring_ctx, err);
9518 kfree(priv);
9519 } else {
9520 complete(priv->sync_reads);
9521 }
9522 }
9523 bio_put(&bbio->bio);
9524 }
9525
btrfs_encoded_read_regular_fill_pages(struct btrfs_inode * inode,u64 disk_bytenr,u64 disk_io_size,struct page ** pages,void * uring_ctx)9526 int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
9527 u64 disk_bytenr, u64 disk_io_size,
9528 struct page **pages, void *uring_ctx)
9529 {
9530 struct btrfs_encoded_read_private *priv, sync_priv;
9531 struct completion sync_reads;
9532 unsigned long i = 0;
9533 struct btrfs_bio *bbio;
9534 int ret;
9535
9536 /*
9537 * Fast path for synchronous reads which completes in this call, io_uring
9538 * needs longer time span.
9539 */
9540 if (uring_ctx) {
9541 priv = kmalloc_obj(struct btrfs_encoded_read_private, GFP_NOFS);
9542 if (!priv)
9543 return -ENOMEM;
9544 } else {
9545 priv = &sync_priv;
9546 init_completion(&sync_reads);
9547 priv->sync_reads = &sync_reads;
9548 }
9549
9550 refcount_set(&priv->pending_refs, 1);
9551 priv->status = 0;
9552 priv->uring_ctx = uring_ctx;
9553
9554 bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, inode, 0,
9555 btrfs_encoded_read_endio, priv);
9556 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
9557
9558 do {
9559 size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE);
9560
9561 if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) {
9562 refcount_inc(&priv->pending_refs);
9563 btrfs_submit_bbio(bbio, 0);
9564
9565 bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, inode, 0,
9566 btrfs_encoded_read_endio, priv);
9567 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
9568 continue;
9569 }
9570
9571 i++;
9572 disk_bytenr += bytes;
9573 disk_io_size -= bytes;
9574 } while (disk_io_size);
9575
9576 refcount_inc(&priv->pending_refs);
9577 btrfs_submit_bbio(bbio, 0);
9578
9579 if (uring_ctx) {
9580 if (refcount_dec_and_test(&priv->pending_refs)) {
9581 ret = blk_status_to_errno(READ_ONCE(priv->status));
9582 btrfs_uring_read_extent_endio(uring_ctx, ret);
9583 kfree(priv);
9584 return ret;
9585 }
9586
9587 return -EIOCBQUEUED;
9588 } else {
9589 if (!refcount_dec_and_test(&priv->pending_refs))
9590 wait_for_completion_io(&sync_reads);
9591 /* See btrfs_encoded_read_endio() for ordering. */
9592 return blk_status_to_errno(READ_ONCE(priv->status));
9593 }
9594 }
9595
btrfs_encoded_read_regular(struct kiocb * iocb,struct iov_iter * iter,u64 start,u64 lockend,struct extent_state ** cached_state,u64 disk_bytenr,u64 disk_io_size,size_t count,bool compressed,bool * unlocked)9596 ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, struct iov_iter *iter,
9597 u64 start, u64 lockend,
9598 struct extent_state **cached_state,
9599 u64 disk_bytenr, u64 disk_io_size,
9600 size_t count, bool compressed, bool *unlocked)
9601 {
9602 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
9603 struct extent_io_tree *io_tree = &inode->io_tree;
9604 struct page **pages;
9605 unsigned long nr_pages, i;
9606 u64 cur;
9607 size_t page_offset;
9608 ssize_t ret;
9609
9610 nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE);
9611 pages = kzalloc_objs(struct page *, nr_pages, GFP_NOFS);
9612 if (!pages)
9613 return -ENOMEM;
9614 ret = btrfs_alloc_page_array(nr_pages, pages, false);
9615 if (ret) {
9616 ret = -ENOMEM;
9617 goto out;
9618 }
9619
9620 ret = btrfs_encoded_read_regular_fill_pages(inode, disk_bytenr,
9621 disk_io_size, pages, NULL);
9622 if (ret)
9623 goto out;
9624
9625 btrfs_unlock_extent(io_tree, start, lockend, cached_state);
9626 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9627 *unlocked = true;
9628
9629 if (compressed) {
9630 i = 0;
9631 page_offset = 0;
9632 } else {
9633 i = (iocb->ki_pos - start) >> PAGE_SHIFT;
9634 page_offset = (iocb->ki_pos - start) & (PAGE_SIZE - 1);
9635 }
9636 cur = 0;
9637 while (cur < count) {
9638 size_t bytes = min_t(size_t, count - cur,
9639 PAGE_SIZE - page_offset);
9640
9641 if (copy_page_to_iter(pages[i], page_offset, bytes,
9642 iter) != bytes) {
9643 ret = -EFAULT;
9644 goto out;
9645 }
9646 i++;
9647 cur += bytes;
9648 page_offset = 0;
9649 }
9650 ret = count;
9651 out:
9652 for (i = 0; i < nr_pages; i++) {
9653 if (pages[i])
9654 __free_page(pages[i]);
9655 }
9656 kfree(pages);
9657 return ret;
9658 }
9659
btrfs_encoded_read(struct kiocb * iocb,struct iov_iter * iter,struct btrfs_ioctl_encoded_io_args * encoded,struct extent_state ** cached_state,u64 * disk_bytenr,u64 * disk_io_size)9660 ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
9661 struct btrfs_ioctl_encoded_io_args *encoded,
9662 struct extent_state **cached_state,
9663 u64 *disk_bytenr, u64 *disk_io_size)
9664 {
9665 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
9666 struct btrfs_fs_info *fs_info = inode->root->fs_info;
9667 struct extent_io_tree *io_tree = &inode->io_tree;
9668 ssize_t ret;
9669 size_t count = iov_iter_count(iter);
9670 u64 start, lockend;
9671 struct extent_map *em;
9672 const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
9673 bool unlocked = false;
9674
9675 file_accessed(iocb->ki_filp);
9676
9677 ret = btrfs_inode_lock(inode,
9678 BTRFS_ILOCK_SHARED | (nowait ? BTRFS_ILOCK_TRY : 0));
9679 if (ret)
9680 return ret;
9681
9682 if (iocb->ki_pos >= inode->vfs_inode.i_size) {
9683 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9684 return 0;
9685 }
9686 start = ALIGN_DOWN(iocb->ki_pos, fs_info->sectorsize);
9687 /*
9688 * We don't know how long the extent containing iocb->ki_pos is, but if
9689 * it's compressed we know that it won't be longer than this.
9690 */
9691 lockend = start + BTRFS_MAX_UNCOMPRESSED - 1;
9692
9693 if (nowait) {
9694 struct btrfs_ordered_extent *ordered;
9695
9696 if (filemap_range_needs_writeback(inode->vfs_inode.i_mapping,
9697 start, lockend)) {
9698 ret = -EAGAIN;
9699 goto out_unlock_inode;
9700 }
9701
9702 if (!btrfs_try_lock_extent(io_tree, start, lockend, cached_state)) {
9703 ret = -EAGAIN;
9704 goto out_unlock_inode;
9705 }
9706
9707 ordered = btrfs_lookup_ordered_range(inode, start,
9708 lockend - start + 1);
9709 if (ordered) {
9710 btrfs_put_ordered_extent(ordered);
9711 btrfs_unlock_extent(io_tree, start, lockend, cached_state);
9712 ret = -EAGAIN;
9713 goto out_unlock_inode;
9714 }
9715 } else {
9716 for (;;) {
9717 struct btrfs_ordered_extent *ordered;
9718
9719 ret = btrfs_wait_ordered_range(inode, start,
9720 lockend - start + 1);
9721 if (ret)
9722 goto out_unlock_inode;
9723
9724 btrfs_lock_extent(io_tree, start, lockend, cached_state);
9725 ordered = btrfs_lookup_ordered_range(inode, start,
9726 lockend - start + 1);
9727 if (!ordered)
9728 break;
9729 btrfs_put_ordered_extent(ordered);
9730 btrfs_unlock_extent(io_tree, start, lockend, cached_state);
9731 cond_resched();
9732 }
9733 }
9734
9735 em = btrfs_get_extent(inode, NULL, start, lockend - start + 1);
9736 if (IS_ERR(em)) {
9737 ret = PTR_ERR(em);
9738 goto out_unlock_extent;
9739 }
9740
9741 if (em->disk_bytenr == EXTENT_MAP_INLINE) {
9742 u64 extent_start = em->start;
9743
9744 /*
9745 * For inline extents we get everything we need out of the
9746 * extent item.
9747 */
9748 btrfs_free_extent_map(em);
9749 em = NULL;
9750 ret = btrfs_encoded_read_inline(iocb, iter, start, lockend,
9751 cached_state, extent_start,
9752 count, encoded, &unlocked);
9753 goto out_unlock_extent;
9754 }
9755
9756 /*
9757 * We only want to return up to EOF even if the extent extends beyond
9758 * that.
9759 */
9760 encoded->len = min_t(u64, btrfs_extent_map_end(em),
9761 inode->vfs_inode.i_size) - iocb->ki_pos;
9762 if (em->disk_bytenr == EXTENT_MAP_HOLE ||
9763 (em->flags & EXTENT_FLAG_PREALLOC)) {
9764 *disk_bytenr = EXTENT_MAP_HOLE;
9765 count = min_t(u64, count, encoded->len);
9766 encoded->len = count;
9767 encoded->unencoded_len = count;
9768 } else if (btrfs_extent_map_is_compressed(em)) {
9769 *disk_bytenr = em->disk_bytenr;
9770 /*
9771 * Bail if the buffer isn't large enough to return the whole
9772 * compressed extent.
9773 */
9774 if (em->disk_num_bytes > count) {
9775 ret = -ENOBUFS;
9776 goto out_em;
9777 }
9778 *disk_io_size = em->disk_num_bytes;
9779 count = em->disk_num_bytes;
9780 encoded->unencoded_len = em->ram_bytes;
9781 encoded->unencoded_offset = iocb->ki_pos - (em->start - em->offset);
9782 ret = btrfs_encoded_io_compression_from_extent(fs_info,
9783 btrfs_extent_map_compression(em));
9784 if (ret < 0)
9785 goto out_em;
9786 encoded->compression = ret;
9787 } else {
9788 *disk_bytenr = btrfs_extent_map_block_start(em) + (start - em->start);
9789 if (encoded->len > count)
9790 encoded->len = count;
9791 /*
9792 * Don't read beyond what we locked. This also limits the page
9793 * allocations that we'll do.
9794 */
9795 *disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start;
9796 count = start + *disk_io_size - iocb->ki_pos;
9797 encoded->len = count;
9798 encoded->unencoded_len = count;
9799 *disk_io_size = ALIGN(*disk_io_size, fs_info->sectorsize);
9800 }
9801 btrfs_free_extent_map(em);
9802 em = NULL;
9803
9804 if (*disk_bytenr == EXTENT_MAP_HOLE) {
9805 btrfs_unlock_extent(io_tree, start, lockend, cached_state);
9806 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9807 unlocked = true;
9808 ret = iov_iter_zero(count, iter);
9809 if (ret != count)
9810 ret = -EFAULT;
9811 } else {
9812 ret = -EIOCBQUEUED;
9813 goto out_unlock_extent;
9814 }
9815
9816 out_em:
9817 btrfs_free_extent_map(em);
9818 out_unlock_extent:
9819 /* Leave inode and extent locked if we need to do a read. */
9820 if (!unlocked && ret != -EIOCBQUEUED)
9821 btrfs_unlock_extent(io_tree, start, lockend, cached_state);
9822 out_unlock_inode:
9823 if (!unlocked && ret != -EIOCBQUEUED)
9824 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9825 return ret;
9826 }
9827
btrfs_do_encoded_write(struct kiocb * iocb,struct iov_iter * from,const struct btrfs_ioctl_encoded_io_args * encoded)9828 ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
9829 const struct btrfs_ioctl_encoded_io_args *encoded)
9830 {
9831 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
9832 struct btrfs_root *root = inode->root;
9833 struct btrfs_fs_info *fs_info = root->fs_info;
9834 struct extent_io_tree *io_tree = &inode->io_tree;
9835 struct extent_changeset *data_reserved = NULL;
9836 struct extent_state *cached_state = NULL;
9837 struct btrfs_ordered_extent *ordered;
9838 struct btrfs_file_extent file_extent;
9839 struct compressed_bio *cb = NULL;
9840 int compression;
9841 size_t orig_count;
9842 const u32 min_folio_size = btrfs_min_folio_size(fs_info);
9843 u64 start, end;
9844 u64 num_bytes, ram_bytes, disk_num_bytes;
9845 struct btrfs_key ins;
9846 bool extent_reserved = false;
9847 struct extent_map *em;
9848 ssize_t ret;
9849
9850 switch (encoded->compression) {
9851 case BTRFS_ENCODED_IO_COMPRESSION_ZLIB:
9852 compression = BTRFS_COMPRESS_ZLIB;
9853 break;
9854 case BTRFS_ENCODED_IO_COMPRESSION_ZSTD:
9855 compression = BTRFS_COMPRESS_ZSTD;
9856 break;
9857 case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K:
9858 case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K:
9859 case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K:
9860 case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K:
9861 case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K:
9862 /* The sector size must match for LZO. */
9863 if (encoded->compression -
9864 BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 12 !=
9865 fs_info->sectorsize_bits)
9866 return -EINVAL;
9867 compression = BTRFS_COMPRESS_LZO;
9868 break;
9869 default:
9870 return -EINVAL;
9871 }
9872 if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE)
9873 return -EINVAL;
9874
9875 /*
9876 * Compressed extents should always have checksums, so error out if we
9877 * have a NOCOW file or inode was created while mounted with NODATASUM.
9878 */
9879 if (inode->flags & BTRFS_INODE_NODATASUM)
9880 return -EINVAL;
9881
9882 orig_count = iov_iter_count(from);
9883
9884 /* The extent size must be sane. */
9885 if (encoded->unencoded_len > BTRFS_MAX_UNCOMPRESSED ||
9886 orig_count > BTRFS_MAX_COMPRESSED || orig_count == 0)
9887 return -EINVAL;
9888
9889 /*
9890 * The compressed data must be smaller than the decompressed data.
9891 *
9892 * It's of course possible for data to compress to larger or the same
9893 * size, but the buffered I/O path falls back to no compression for such
9894 * data, and we don't want to break any assumptions by creating these
9895 * extents.
9896 *
9897 * Note that this is less strict than the current check we have that the
9898 * compressed data must be at least one sector smaller than the
9899 * decompressed data. We only want to enforce the weaker requirement
9900 * from old kernels that it is at least one byte smaller.
9901 */
9902 if (orig_count >= encoded->unencoded_len)
9903 return -EINVAL;
9904
9905 /* The extent must start on a sector boundary. */
9906 start = iocb->ki_pos;
9907 if (!IS_ALIGNED(start, fs_info->sectorsize))
9908 return -EINVAL;
9909
9910 /*
9911 * The extent must end on a sector boundary. However, we allow a write
9912 * which ends at or extends i_size to have an unaligned length; we round
9913 * up the extent size and set i_size to the unaligned end.
9914 */
9915 if (start + encoded->len < inode->vfs_inode.i_size &&
9916 !IS_ALIGNED(start + encoded->len, fs_info->sectorsize))
9917 return -EINVAL;
9918
9919 /* Finally, the offset in the unencoded data must be sector-aligned. */
9920 if (!IS_ALIGNED(encoded->unencoded_offset, fs_info->sectorsize))
9921 return -EINVAL;
9922
9923 num_bytes = ALIGN(encoded->len, fs_info->sectorsize);
9924 ram_bytes = ALIGN(encoded->unencoded_len, fs_info->sectorsize);
9925 end = start + num_bytes - 1;
9926
9927 /*
9928 * If the extent cannot be inline, the compressed data on disk must be
9929 * sector-aligned. For convenience, we extend it with zeroes if it
9930 * isn't.
9931 */
9932 disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize);
9933
9934 cb = btrfs_alloc_compressed_write(inode, start, num_bytes);
9935 for (int i = 0; i * min_folio_size < disk_num_bytes; i++) {
9936 struct folio *folio;
9937 size_t bytes = min(min_folio_size, iov_iter_count(from));
9938 char *kaddr;
9939
9940 folio = btrfs_alloc_compr_folio(fs_info);
9941 if (!folio) {
9942 ret = -ENOMEM;
9943 goto out_cb;
9944 }
9945 kaddr = kmap_local_folio(folio, 0);
9946 ret = copy_from_iter(kaddr, bytes, from);
9947 kunmap_local(kaddr);
9948 if (ret != bytes) {
9949 folio_put(folio);
9950 ret = -EFAULT;
9951 goto out_cb;
9952 }
9953 if (bytes < min_folio_size)
9954 folio_zero_range(folio, bytes, min_folio_size - bytes);
9955 ret = bio_add_folio(&cb->bbio.bio, folio, folio_size(folio), 0);
9956 if (unlikely(!ret)) {
9957 folio_put(folio);
9958 ret = -EINVAL;
9959 goto out_cb;
9960 }
9961 }
9962 ASSERT(cb->bbio.bio.bi_iter.bi_size == disk_num_bytes);
9963
9964 for (;;) {
9965 ret = btrfs_wait_ordered_range(inode, start, num_bytes);
9966 if (ret)
9967 goto out_cb;
9968 ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping,
9969 start >> PAGE_SHIFT,
9970 end >> PAGE_SHIFT);
9971 if (ret)
9972 goto out_cb;
9973 btrfs_lock_extent(io_tree, start, end, &cached_state);
9974 ordered = btrfs_lookup_ordered_range(inode, start, num_bytes);
9975 if (!ordered &&
9976 !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end))
9977 break;
9978 if (ordered)
9979 btrfs_put_ordered_extent(ordered);
9980 btrfs_unlock_extent(io_tree, start, end, &cached_state);
9981 cond_resched();
9982 }
9983
9984 /*
9985 * We don't use the higher-level delalloc space functions because our
9986 * num_bytes and disk_num_bytes are different.
9987 */
9988 ret = btrfs_alloc_data_chunk_ondemand(inode, disk_num_bytes);
9989 if (ret)
9990 goto out_unlock;
9991 ret = btrfs_qgroup_reserve_data(inode, &data_reserved, start, num_bytes);
9992 if (ret)
9993 goto out_free_data_space;
9994 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes, disk_num_bytes,
9995 false);
9996 if (ret)
9997 goto out_qgroup_free_data;
9998
9999 /* Try an inline extent first. */
10000 if (encoded->unencoded_len == encoded->len &&
10001 encoded->unencoded_offset == 0 &&
10002 can_cow_file_range_inline(inode, start, encoded->len, orig_count)) {
10003 ret = __cow_file_range_inline(inode, encoded->len,
10004 orig_count, compression,
10005 bio_first_folio_all(&cb->bbio.bio),
10006 true);
10007 if (ret <= 0) {
10008 if (ret == 0)
10009 ret = orig_count;
10010 goto out_delalloc_release;
10011 }
10012 }
10013
10014 ret = btrfs_reserve_extent(root, disk_num_bytes, disk_num_bytes,
10015 disk_num_bytes, 0, 0, &ins, true, true);
10016 if (ret)
10017 goto out_delalloc_release;
10018 extent_reserved = true;
10019
10020 file_extent.disk_bytenr = ins.objectid;
10021 file_extent.disk_num_bytes = ins.offset;
10022 file_extent.num_bytes = num_bytes;
10023 file_extent.ram_bytes = ram_bytes;
10024 file_extent.offset = encoded->unencoded_offset;
10025 file_extent.compression = compression;
10026 em = btrfs_create_io_em(inode, start, &file_extent, BTRFS_ORDERED_COMPRESSED);
10027 if (IS_ERR(em)) {
10028 ret = PTR_ERR(em);
10029 goto out_free_reserved;
10030 }
10031 btrfs_free_extent_map(em);
10032
10033 ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
10034 (1U << BTRFS_ORDERED_ENCODED) |
10035 (1U << BTRFS_ORDERED_COMPRESSED));
10036 if (IS_ERR(ordered)) {
10037 btrfs_drop_extent_map_range(inode, start, end, false);
10038 ret = PTR_ERR(ordered);
10039 goto out_free_reserved;
10040 }
10041 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10042
10043 if (start + encoded->len > inode->vfs_inode.i_size)
10044 i_size_write(&inode->vfs_inode, start + encoded->len);
10045
10046 btrfs_unlock_extent(io_tree, start, end, &cached_state);
10047
10048 btrfs_delalloc_release_extents(inode, num_bytes);
10049
10050 btrfs_submit_compressed_write(ordered, cb);
10051 ret = orig_count;
10052 goto out;
10053
10054 out_free_reserved:
10055 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
10056 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, true);
10057 out_delalloc_release:
10058 btrfs_delalloc_release_extents(inode, num_bytes);
10059 btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0);
10060 out_qgroup_free_data:
10061 if (ret < 0)
10062 btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes, NULL);
10063 out_free_data_space:
10064 /*
10065 * If btrfs_reserve_extent() succeeded, then we already decremented
10066 * bytes_may_use.
10067 */
10068 if (!extent_reserved)
10069 btrfs_free_reserved_data_space_noquota(inode, disk_num_bytes);
10070 out_unlock:
10071 btrfs_unlock_extent(io_tree, start, end, &cached_state);
10072 out_cb:
10073 if (cb)
10074 cleanup_compressed_bio(cb);
10075 out:
10076 if (ret >= 0)
10077 iocb->ki_pos += encoded->len;
10078 return ret;
10079 }
10080
10081 #ifdef CONFIG_SWAP
10082 /*
10083 * Add an entry indicating a block group or device which is pinned by a
10084 * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a
10085 * negative errno on failure.
10086 */
btrfs_add_swapfile_pin(struct inode * inode,void * ptr,bool is_block_group)10087 static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr,
10088 bool is_block_group)
10089 {
10090 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
10091 struct btrfs_swapfile_pin *sp, *entry;
10092 struct rb_node **p;
10093 struct rb_node *parent = NULL;
10094
10095 sp = kmalloc_obj(*sp, GFP_NOFS);
10096 if (!sp)
10097 return -ENOMEM;
10098 sp->ptr = ptr;
10099 sp->inode = inode;
10100 sp->is_block_group = is_block_group;
10101 sp->bg_extent_count = 1;
10102
10103 spin_lock(&fs_info->swapfile_pins_lock);
10104 p = &fs_info->swapfile_pins.rb_node;
10105 while (*p) {
10106 parent = *p;
10107 entry = rb_entry(parent, struct btrfs_swapfile_pin, node);
10108 if (sp->ptr < entry->ptr ||
10109 (sp->ptr == entry->ptr && sp->inode < entry->inode)) {
10110 p = &(*p)->rb_left;
10111 } else if (sp->ptr > entry->ptr ||
10112 (sp->ptr == entry->ptr && sp->inode > entry->inode)) {
10113 p = &(*p)->rb_right;
10114 } else {
10115 if (is_block_group)
10116 entry->bg_extent_count++;
10117 spin_unlock(&fs_info->swapfile_pins_lock);
10118 kfree(sp);
10119 return 1;
10120 }
10121 }
10122 rb_link_node(&sp->node, parent, p);
10123 rb_insert_color(&sp->node, &fs_info->swapfile_pins);
10124 spin_unlock(&fs_info->swapfile_pins_lock);
10125 return 0;
10126 }
10127
10128 /* Free all of the entries pinned by this swapfile. */
btrfs_free_swapfile_pins(struct inode * inode)10129 static void btrfs_free_swapfile_pins(struct inode *inode)
10130 {
10131 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
10132 struct btrfs_swapfile_pin *sp;
10133 struct rb_node *node, *next;
10134
10135 spin_lock(&fs_info->swapfile_pins_lock);
10136 node = rb_first(&fs_info->swapfile_pins);
10137 while (node) {
10138 next = rb_next(node);
10139 sp = rb_entry(node, struct btrfs_swapfile_pin, node);
10140 if (sp->inode == inode) {
10141 rb_erase(&sp->node, &fs_info->swapfile_pins);
10142 if (sp->is_block_group) {
10143 btrfs_dec_block_group_swap_extents(sp->ptr,
10144 sp->bg_extent_count);
10145 btrfs_put_block_group(sp->ptr);
10146 }
10147 kfree(sp);
10148 }
10149 node = next;
10150 }
10151 spin_unlock(&fs_info->swapfile_pins_lock);
10152 }
10153
10154 struct btrfs_swap_info {
10155 u64 start;
10156 u64 block_start;
10157 u64 block_len;
10158 u64 lowest_ppage;
10159 u64 highest_ppage;
10160 unsigned long nr_pages;
10161 int nr_extents;
10162 };
10163
btrfs_add_swap_extent(struct swap_info_struct * sis,struct btrfs_swap_info * bsi)10164 static int btrfs_add_swap_extent(struct swap_info_struct *sis,
10165 struct btrfs_swap_info *bsi)
10166 {
10167 unsigned long nr_pages;
10168 unsigned long max_pages;
10169 u64 first_ppage, first_ppage_reported, next_ppage;
10170 int ret;
10171
10172 /*
10173 * Our swapfile may have had its size extended after the swap header was
10174 * written. In that case activating the swapfile should not go beyond
10175 * the max size set in the swap header.
10176 */
10177 if (bsi->nr_pages >= sis->max)
10178 return 0;
10179
10180 max_pages = sis->max - bsi->nr_pages;
10181 first_ppage = PAGE_ALIGN(bsi->block_start) >> PAGE_SHIFT;
10182 next_ppage = PAGE_ALIGN_DOWN(bsi->block_start + bsi->block_len) >> PAGE_SHIFT;
10183
10184 if (first_ppage >= next_ppage)
10185 return 0;
10186 nr_pages = next_ppage - first_ppage;
10187 nr_pages = min(nr_pages, max_pages);
10188
10189 first_ppage_reported = first_ppage;
10190 if (bsi->start == 0)
10191 first_ppage_reported++;
10192 if (bsi->lowest_ppage > first_ppage_reported)
10193 bsi->lowest_ppage = first_ppage_reported;
10194 if (bsi->highest_ppage < (next_ppage - 1))
10195 bsi->highest_ppage = next_ppage - 1;
10196
10197 ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage);
10198 if (ret < 0)
10199 return ret;
10200 bsi->nr_extents += ret;
10201 bsi->nr_pages += nr_pages;
10202 return 0;
10203 }
10204
btrfs_swap_deactivate(struct file * file)10205 static void btrfs_swap_deactivate(struct file *file)
10206 {
10207 struct inode *inode = file_inode(file);
10208
10209 btrfs_free_swapfile_pins(inode);
10210 atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles);
10211 }
10212
btrfs_swap_activate(struct swap_info_struct * sis,struct file * file,sector_t * span)10213 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
10214 sector_t *span)
10215 {
10216 struct inode *inode = file_inode(file);
10217 struct btrfs_root *root = BTRFS_I(inode)->root;
10218 struct btrfs_fs_info *fs_info = root->fs_info;
10219 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
10220 struct extent_state *cached_state = NULL;
10221 struct btrfs_chunk_map *map = NULL;
10222 struct btrfs_device *device = NULL;
10223 struct btrfs_swap_info bsi = {
10224 .lowest_ppage = (sector_t)-1ULL,
10225 };
10226 struct btrfs_backref_share_check_ctx *backref_ctx = NULL;
10227 struct btrfs_path *path = NULL;
10228 int ret = 0;
10229 u64 isize;
10230 u64 prev_extent_end = 0;
10231
10232 /*
10233 * Acquire the inode's mmap lock to prevent races with memory mapped
10234 * writes, as they could happen after we flush delalloc below and before
10235 * we lock the extent range further below. The inode was already locked
10236 * up in the call chain.
10237 */
10238 btrfs_assert_inode_locked(BTRFS_I(inode));
10239 down_write(&BTRFS_I(inode)->i_mmap_lock);
10240
10241 /*
10242 * If the swap file was just created, make sure delalloc is done. If the
10243 * file changes again after this, the user is doing something stupid and
10244 * we don't really care.
10245 */
10246 ret = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
10247 if (ret)
10248 goto out_unlock_mmap;
10249
10250 /*
10251 * The inode is locked, so these flags won't change after we check them.
10252 */
10253 if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) {
10254 btrfs_warn(fs_info, "swapfile must not be compressed");
10255 ret = -EINVAL;
10256 goto out_unlock_mmap;
10257 }
10258 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) {
10259 btrfs_warn(fs_info, "swapfile must not be copy-on-write");
10260 ret = -EINVAL;
10261 goto out_unlock_mmap;
10262 }
10263 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
10264 btrfs_warn(fs_info, "swapfile must not be checksummed");
10265 ret = -EINVAL;
10266 goto out_unlock_mmap;
10267 }
10268
10269 path = btrfs_alloc_path();
10270 backref_ctx = btrfs_alloc_backref_share_check_ctx();
10271 if (!path || !backref_ctx) {
10272 ret = -ENOMEM;
10273 goto out_unlock_mmap;
10274 }
10275
10276 /*
10277 * Balance or device remove/replace/resize can move stuff around from
10278 * under us. The exclop protection makes sure they aren't running/won't
10279 * run concurrently while we are mapping the swap extents, and
10280 * fs_info->swapfile_pins prevents them from running while the swap
10281 * file is active and moving the extents. Note that this also prevents
10282 * a concurrent device add which isn't actually necessary, but it's not
10283 * really worth the trouble to allow it.
10284 */
10285 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) {
10286 btrfs_warn(fs_info,
10287 "cannot activate swapfile while exclusive operation is running");
10288 ret = -EBUSY;
10289 goto out_unlock_mmap;
10290 }
10291
10292 /*
10293 * Prevent snapshot creation while we are activating the swap file.
10294 * We do not want to race with snapshot creation. If snapshot creation
10295 * already started before we bumped nr_swapfiles from 0 to 1 and
10296 * completes before the first write into the swap file after it is
10297 * activated, than that write would fallback to COW.
10298 */
10299 if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) {
10300 btrfs_exclop_finish(fs_info);
10301 btrfs_warn(fs_info,
10302 "cannot activate swapfile because snapshot creation is in progress");
10303 ret = -EINVAL;
10304 goto out_unlock_mmap;
10305 }
10306 /*
10307 * Snapshots can create extents which require COW even if NODATACOW is
10308 * set. We use this counter to prevent snapshots. We must increment it
10309 * before walking the extents because we don't want a concurrent
10310 * snapshot to run after we've already checked the extents.
10311 *
10312 * It is possible that subvolume is marked for deletion but still not
10313 * removed yet. To prevent this race, we check the root status before
10314 * activating the swapfile.
10315 */
10316 spin_lock(&root->root_item_lock);
10317 if (btrfs_root_dead(root)) {
10318 spin_unlock(&root->root_item_lock);
10319
10320 btrfs_drew_write_unlock(&root->snapshot_lock);
10321 btrfs_exclop_finish(fs_info);
10322 btrfs_warn(fs_info,
10323 "cannot activate swapfile because subvolume %llu is being deleted",
10324 btrfs_root_id(root));
10325 ret = -EPERM;
10326 goto out_unlock_mmap;
10327 }
10328 atomic_inc(&root->nr_swapfiles);
10329 spin_unlock(&root->root_item_lock);
10330
10331 isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
10332
10333 btrfs_lock_extent(io_tree, 0, isize - 1, &cached_state);
10334 while (prev_extent_end < isize) {
10335 struct btrfs_key key;
10336 struct extent_buffer *leaf;
10337 struct btrfs_file_extent_item *ei;
10338 struct btrfs_block_group *bg;
10339 u64 logical_block_start;
10340 u64 physical_block_start;
10341 u64 extent_gen;
10342 u64 disk_bytenr;
10343 u64 len;
10344
10345 key.objectid = btrfs_ino(BTRFS_I(inode));
10346 key.type = BTRFS_EXTENT_DATA_KEY;
10347 key.offset = prev_extent_end;
10348
10349 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
10350 if (ret < 0)
10351 goto out;
10352
10353 /*
10354 * If key not found it means we have an implicit hole (NO_HOLES
10355 * is enabled).
10356 */
10357 if (ret > 0) {
10358 btrfs_warn(fs_info, "swapfile must not have holes");
10359 ret = -EINVAL;
10360 goto out;
10361 }
10362
10363 leaf = path->nodes[0];
10364 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
10365
10366 if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) {
10367 /*
10368 * It's unlikely we'll ever actually find ourselves
10369 * here, as a file small enough to fit inline won't be
10370 * big enough to store more than the swap header, but in
10371 * case something changes in the future, let's catch it
10372 * here rather than later.
10373 */
10374 btrfs_warn(fs_info, "swapfile must not be inline");
10375 ret = -EINVAL;
10376 goto out;
10377 }
10378
10379 if (btrfs_file_extent_compression(leaf, ei) != BTRFS_COMPRESS_NONE) {
10380 btrfs_warn(fs_info, "swapfile must not be compressed");
10381 ret = -EINVAL;
10382 goto out;
10383 }
10384
10385 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
10386 if (disk_bytenr == 0) {
10387 btrfs_warn(fs_info, "swapfile must not have holes");
10388 ret = -EINVAL;
10389 goto out;
10390 }
10391
10392 logical_block_start = disk_bytenr + btrfs_file_extent_offset(leaf, ei);
10393 extent_gen = btrfs_file_extent_generation(leaf, ei);
10394 prev_extent_end = btrfs_file_extent_end(path);
10395
10396 if (prev_extent_end > isize)
10397 len = isize - key.offset;
10398 else
10399 len = btrfs_file_extent_num_bytes(leaf, ei);
10400
10401 backref_ctx->curr_leaf_bytenr = leaf->start;
10402
10403 /*
10404 * Don't need the path anymore, release to avoid deadlocks when
10405 * calling btrfs_is_data_extent_shared() because when joining a
10406 * transaction it can block waiting for the current one's commit
10407 * which in turn may be trying to lock the same leaf to flush
10408 * delayed items for example.
10409 */
10410 btrfs_release_path(path);
10411
10412 ret = btrfs_is_data_extent_shared(BTRFS_I(inode), disk_bytenr,
10413 extent_gen, backref_ctx);
10414 if (ret < 0) {
10415 goto out;
10416 } else if (ret > 0) {
10417 btrfs_warn(fs_info,
10418 "swapfile must not be copy-on-write");
10419 ret = -EINVAL;
10420 goto out;
10421 }
10422
10423 map = btrfs_get_chunk_map(fs_info, logical_block_start, len);
10424 if (IS_ERR(map)) {
10425 ret = PTR_ERR(map);
10426 goto out;
10427 }
10428
10429 if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
10430 btrfs_warn(fs_info,
10431 "swapfile must have single data profile");
10432 ret = -EINVAL;
10433 goto out;
10434 }
10435
10436 if (device == NULL) {
10437 device = map->stripes[0].dev;
10438 ret = btrfs_add_swapfile_pin(inode, device, false);
10439 if (ret == 1)
10440 ret = 0;
10441 else if (ret)
10442 goto out;
10443 } else if (device != map->stripes[0].dev) {
10444 btrfs_warn(fs_info, "swapfile must be on one device");
10445 ret = -EINVAL;
10446 goto out;
10447 }
10448
10449 physical_block_start = (map->stripes[0].physical +
10450 (logical_block_start - map->start));
10451 btrfs_free_chunk_map(map);
10452 map = NULL;
10453
10454 bg = btrfs_lookup_block_group(fs_info, logical_block_start);
10455 if (!bg) {
10456 btrfs_warn(fs_info,
10457 "could not find block group containing swapfile");
10458 ret = -EINVAL;
10459 goto out;
10460 }
10461
10462 if (!btrfs_inc_block_group_swap_extents(bg)) {
10463 btrfs_warn(fs_info,
10464 "block group for swapfile at %llu is read-only%s",
10465 bg->start,
10466 atomic_read(&fs_info->scrubs_running) ?
10467 " (scrub running)" : "");
10468 btrfs_put_block_group(bg);
10469 ret = -EINVAL;
10470 goto out;
10471 }
10472
10473 ret = btrfs_add_swapfile_pin(inode, bg, true);
10474 if (ret) {
10475 btrfs_put_block_group(bg);
10476 if (ret == 1)
10477 ret = 0;
10478 else
10479 goto out;
10480 }
10481
10482 if (bsi.block_len &&
10483 bsi.block_start + bsi.block_len == physical_block_start) {
10484 bsi.block_len += len;
10485 } else {
10486 if (bsi.block_len) {
10487 ret = btrfs_add_swap_extent(sis, &bsi);
10488 if (ret)
10489 goto out;
10490 }
10491 bsi.start = key.offset;
10492 bsi.block_start = physical_block_start;
10493 bsi.block_len = len;
10494 }
10495
10496 if (fatal_signal_pending(current)) {
10497 ret = -EINTR;
10498 goto out;
10499 }
10500
10501 cond_resched();
10502 }
10503
10504 if (bsi.block_len)
10505 ret = btrfs_add_swap_extent(sis, &bsi);
10506
10507 out:
10508 if (!IS_ERR_OR_NULL(map))
10509 btrfs_free_chunk_map(map);
10510
10511 btrfs_unlock_extent(io_tree, 0, isize - 1, &cached_state);
10512
10513 if (ret)
10514 btrfs_swap_deactivate(file);
10515
10516 btrfs_drew_write_unlock(&root->snapshot_lock);
10517
10518 btrfs_exclop_finish(fs_info);
10519
10520 out_unlock_mmap:
10521 up_write(&BTRFS_I(inode)->i_mmap_lock);
10522 btrfs_free_backref_share_ctx(backref_ctx);
10523 btrfs_free_path(path);
10524 if (ret)
10525 return ret;
10526
10527 if (device)
10528 sis->bdev = device->bdev;
10529 *span = bsi.highest_ppage - bsi.lowest_ppage + 1;
10530 sis->max = bsi.nr_pages;
10531 sis->pages = bsi.nr_pages - 1;
10532 return bsi.nr_extents;
10533 }
10534 #else
btrfs_swap_deactivate(struct file * file)10535 static void btrfs_swap_deactivate(struct file *file)
10536 {
10537 }
10538
btrfs_swap_activate(struct swap_info_struct * sis,struct file * file,sector_t * span)10539 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
10540 sector_t *span)
10541 {
10542 return -EOPNOTSUPP;
10543 }
10544 #endif
10545
10546 /*
10547 * Update the number of bytes used in the VFS' inode. When we replace extents in
10548 * a range (clone, dedupe, fallocate's zero range), we must update the number of
10549 * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls
10550 * always get a correct value.
10551 */
btrfs_update_inode_bytes(struct btrfs_inode * inode,const u64 add_bytes,const u64 del_bytes)10552 void btrfs_update_inode_bytes(struct btrfs_inode *inode,
10553 const u64 add_bytes,
10554 const u64 del_bytes)
10555 {
10556 if (add_bytes == del_bytes)
10557 return;
10558
10559 spin_lock(&inode->lock);
10560 if (del_bytes > 0)
10561 inode_sub_bytes(&inode->vfs_inode, del_bytes);
10562 if (add_bytes > 0)
10563 inode_add_bytes(&inode->vfs_inode, add_bytes);
10564 spin_unlock(&inode->lock);
10565 }
10566
10567 /*
10568 * Verify that there are no ordered extents for a given file range.
10569 *
10570 * @inode: The target inode.
10571 * @start: Start offset of the file range, should be sector size aligned.
10572 * @end: End offset (inclusive) of the file range, its value +1 should be
10573 * sector size aligned.
10574 *
10575 * This should typically be used for cases where we locked an inode's VFS lock in
10576 * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode,
10577 * we have flushed all delalloc in the range, we have waited for all ordered
10578 * extents in the range to complete and finally we have locked the file range in
10579 * the inode's io_tree.
10580 */
btrfs_assert_inode_range_clean(struct btrfs_inode * inode,u64 start,u64 end)10581 void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end)
10582 {
10583 struct btrfs_root *root = inode->root;
10584 struct btrfs_ordered_extent *ordered;
10585
10586 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
10587 return;
10588
10589 ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start);
10590 if (ordered) {
10591 btrfs_err(root->fs_info,
10592 "found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])",
10593 start, end, btrfs_ino(inode), btrfs_root_id(root),
10594 ordered->file_offset,
10595 ordered->file_offset + ordered->num_bytes - 1);
10596 btrfs_put_ordered_extent(ordered);
10597 }
10598
10599 ASSERT(ordered == NULL);
10600 }
10601
10602 /*
10603 * Find the first inode with a minimum number.
10604 *
10605 * @root: The root to search for.
10606 * @min_ino: The minimum inode number.
10607 *
10608 * Find the first inode in the @root with a number >= @min_ino and return it.
10609 * Returns NULL if no such inode found.
10610 */
btrfs_find_first_inode(struct btrfs_root * root,u64 min_ino)10611 struct btrfs_inode *btrfs_find_first_inode(struct btrfs_root *root, u64 min_ino)
10612 {
10613 struct btrfs_inode *inode;
10614 unsigned long from = min_ino;
10615
10616 xa_lock(&root->inodes);
10617 while (true) {
10618 inode = xa_find(&root->inodes, &from, ULONG_MAX, XA_PRESENT);
10619 if (!inode)
10620 break;
10621 if (igrab(&inode->vfs_inode))
10622 break;
10623
10624 from = btrfs_ino(inode) + 1;
10625 cond_resched_lock(&root->inodes.xa_lock);
10626 }
10627 xa_unlock(&root->inodes);
10628
10629 return inode;
10630 }
10631
10632 static const struct inode_operations btrfs_dir_inode_operations = {
10633 .getattr = btrfs_getattr,
10634 .lookup = btrfs_lookup,
10635 .create = btrfs_create,
10636 .unlink = btrfs_unlink,
10637 .link = btrfs_link,
10638 .mkdir = btrfs_mkdir,
10639 .rmdir = btrfs_rmdir,
10640 .rename = btrfs_rename2,
10641 .symlink = btrfs_symlink,
10642 .setattr = btrfs_setattr,
10643 .mknod = btrfs_mknod,
10644 .listxattr = btrfs_listxattr,
10645 .permission = btrfs_permission,
10646 .get_inode_acl = btrfs_get_acl,
10647 .set_acl = btrfs_set_acl,
10648 .update_time = btrfs_update_time,
10649 .tmpfile = btrfs_tmpfile,
10650 .fileattr_get = btrfs_fileattr_get,
10651 .fileattr_set = btrfs_fileattr_set,
10652 };
10653
10654 static const struct file_operations btrfs_dir_file_operations = {
10655 .llseek = btrfs_dir_llseek,
10656 .read = generic_read_dir,
10657 .iterate_shared = btrfs_real_readdir,
10658 .open = btrfs_opendir,
10659 .unlocked_ioctl = btrfs_ioctl,
10660 #ifdef CONFIG_COMPAT
10661 .compat_ioctl = btrfs_compat_ioctl,
10662 #endif
10663 .release = btrfs_release_file,
10664 .fsync = btrfs_sync_file,
10665 .setlease = generic_setlease,
10666 };
10667
10668 /*
10669 * btrfs doesn't support the bmap operation because swapfiles
10670 * use bmap to make a mapping of extents in the file. They assume
10671 * these extents won't change over the life of the file and they
10672 * use the bmap result to do IO directly to the drive.
10673 *
10674 * the btrfs bmap call would return logical addresses that aren't
10675 * suitable for IO and they also will change frequently as COW
10676 * operations happen. So, swapfile + btrfs == corruption.
10677 *
10678 * For now we're avoiding this by dropping bmap.
10679 */
10680 static const struct address_space_operations btrfs_aops = {
10681 .read_folio = btrfs_read_folio,
10682 .writepages = btrfs_writepages,
10683 .readahead = btrfs_readahead,
10684 .invalidate_folio = btrfs_invalidate_folio,
10685 .launder_folio = btrfs_launder_folio,
10686 .release_folio = btrfs_release_folio,
10687 .migrate_folio = btrfs_migrate_folio,
10688 .dirty_folio = filemap_dirty_folio,
10689 .error_remove_folio = generic_error_remove_folio,
10690 .swap_activate = btrfs_swap_activate,
10691 .swap_deactivate = btrfs_swap_deactivate,
10692 };
10693
10694 static const struct inode_operations btrfs_file_inode_operations = {
10695 .getattr = btrfs_getattr,
10696 .setattr = btrfs_setattr,
10697 .listxattr = btrfs_listxattr,
10698 .permission = btrfs_permission,
10699 .fiemap = btrfs_fiemap,
10700 .get_inode_acl = btrfs_get_acl,
10701 .set_acl = btrfs_set_acl,
10702 .update_time = btrfs_update_time,
10703 .fileattr_get = btrfs_fileattr_get,
10704 .fileattr_set = btrfs_fileattr_set,
10705 };
10706 static const struct inode_operations btrfs_special_inode_operations = {
10707 .getattr = btrfs_getattr,
10708 .setattr = btrfs_setattr,
10709 .permission = btrfs_permission,
10710 .listxattr = btrfs_listxattr,
10711 .get_inode_acl = btrfs_get_acl,
10712 .set_acl = btrfs_set_acl,
10713 .update_time = btrfs_update_time,
10714 };
10715 static const struct inode_operations btrfs_symlink_inode_operations = {
10716 .get_link = page_get_link,
10717 .getattr = btrfs_getattr,
10718 .setattr = btrfs_setattr,
10719 .permission = btrfs_permission,
10720 .listxattr = btrfs_listxattr,
10721 .update_time = btrfs_update_time,
10722 };
10723
10724 const struct dentry_operations btrfs_dentry_operations = {
10725 .d_delete = btrfs_dentry_delete,
10726 };
10727