1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6 #include <crypto/hash.h>
7 #include <linux/kernel.h>
8 #include <linux/bio.h>
9 #include <linux/blk-cgroup.h>
10 #include <linux/file.h>
11 #include <linux/fs.h>
12 #include <linux/pagemap.h>
13 #include <linux/highmem.h>
14 #include <linux/time.h>
15 #include <linux/init.h>
16 #include <linux/string.h>
17 #include <linux/backing-dev.h>
18 #include <linux/writeback.h>
19 #include <linux/compat.h>
20 #include <linux/xattr.h>
21 #include <linux/posix_acl.h>
22 #include <linux/falloc.h>
23 #include <linux/slab.h>
24 #include <linux/ratelimit.h>
25 #include <linux/btrfs.h>
26 #include <linux/blkdev.h>
27 #include <linux/posix_acl_xattr.h>
28 #include <linux/uio.h>
29 #include <linux/magic.h>
30 #include <linux/iversion.h>
31 #include <linux/swap.h>
32 #include <linux/migrate.h>
33 #include <linux/sched/mm.h>
34 #include <linux/iomap.h>
35 #include <linux/unaligned.h>
36 #include <linux/fsverity.h>
37 #include "misc.h"
38 #include "ctree.h"
39 #include "disk-io.h"
40 #include "transaction.h"
41 #include "btrfs_inode.h"
42 #include "ordered-data.h"
43 #include "xattr.h"
44 #include "tree-log.h"
45 #include "bio.h"
46 #include "compression.h"
47 #include "locking.h"
48 #include "props.h"
49 #include "qgroup.h"
50 #include "delalloc-space.h"
51 #include "block-group.h"
52 #include "space-info.h"
53 #include "zoned.h"
54 #include "subpage.h"
55 #include "inode-item.h"
56 #include "fs.h"
57 #include "accessors.h"
58 #include "extent-tree.h"
59 #include "root-tree.h"
60 #include "defrag.h"
61 #include "dir-item.h"
62 #include "file-item.h"
63 #include "uuid-tree.h"
64 #include "ioctl.h"
65 #include "file.h"
66 #include "acl.h"
67 #include "relocation.h"
68 #include "verity.h"
69 #include "super.h"
70 #include "orphan.h"
71 #include "backref.h"
72 #include "raid-stripe-tree.h"
73 #include "fiemap.h"
74
75 struct btrfs_iget_args {
76 u64 ino;
77 struct btrfs_root *root;
78 };
79
80 struct btrfs_rename_ctx {
81 /* Output field. Stores the index number of the old directory entry. */
82 u64 index;
83 };
84
85 /*
86 * Used by data_reloc_print_warning_inode() to pass needed info for filename
87 * resolution and output of error message.
88 */
89 struct data_reloc_warn {
90 struct btrfs_path path;
91 struct btrfs_fs_info *fs_info;
92 u64 extent_item_size;
93 u64 logical;
94 int mirror_num;
95 };
96
97 /*
98 * For the file_extent_tree, we want to hold the inode lock when we lookup and
99 * update the disk_i_size, but lockdep will complain because our io_tree we hold
100 * the tree lock and get the inode lock when setting delalloc. These two things
101 * are unrelated, so make a class for the file_extent_tree so we don't get the
102 * two locking patterns mixed up.
103 */
104 static struct lock_class_key file_extent_tree_class;
105
106 static const struct inode_operations btrfs_dir_inode_operations;
107 static const struct inode_operations btrfs_symlink_inode_operations;
108 static const struct inode_operations btrfs_special_inode_operations;
109 static const struct inode_operations btrfs_file_inode_operations;
110 static const struct address_space_operations btrfs_aops;
111 static const struct file_operations btrfs_dir_file_operations;
112
113 static struct kmem_cache *btrfs_inode_cachep;
114
115 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
116 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback);
117
118 static noinline int run_delalloc_cow(struct btrfs_inode *inode,
119 struct folio *locked_folio, u64 start,
120 u64 end, struct writeback_control *wbc,
121 bool pages_dirty);
122
data_reloc_print_warning_inode(u64 inum,u64 offset,u64 num_bytes,u64 root,void * warn_ctx)123 static int data_reloc_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
124 u64 root, void *warn_ctx)
125 {
126 struct data_reloc_warn *warn = warn_ctx;
127 struct btrfs_fs_info *fs_info = warn->fs_info;
128 struct extent_buffer *eb;
129 struct btrfs_inode_item *inode_item;
130 struct inode_fs_paths *ipath = NULL;
131 struct btrfs_root *local_root;
132 struct btrfs_key key;
133 unsigned int nofs_flag;
134 u32 nlink;
135 int ret;
136
137 local_root = btrfs_get_fs_root(fs_info, root, true);
138 if (IS_ERR(local_root)) {
139 ret = PTR_ERR(local_root);
140 goto err;
141 }
142
143 /* This makes the path point to (inum INODE_ITEM ioff). */
144 key.objectid = inum;
145 key.type = BTRFS_INODE_ITEM_KEY;
146 key.offset = 0;
147
148 ret = btrfs_search_slot(NULL, local_root, &key, &warn->path, 0, 0);
149 if (ret) {
150 btrfs_put_root(local_root);
151 btrfs_release_path(&warn->path);
152 goto err;
153 }
154
155 eb = warn->path.nodes[0];
156 inode_item = btrfs_item_ptr(eb, warn->path.slots[0], struct btrfs_inode_item);
157 nlink = btrfs_inode_nlink(eb, inode_item);
158 btrfs_release_path(&warn->path);
159
160 nofs_flag = memalloc_nofs_save();
161 ipath = init_ipath(4096, local_root, &warn->path);
162 memalloc_nofs_restore(nofs_flag);
163 if (IS_ERR(ipath)) {
164 btrfs_put_root(local_root);
165 ret = PTR_ERR(ipath);
166 ipath = NULL;
167 /*
168 * -ENOMEM, not a critical error, just output an generic error
169 * without filename.
170 */
171 btrfs_warn(fs_info,
172 "checksum error at logical %llu mirror %u root %llu, inode %llu offset %llu",
173 warn->logical, warn->mirror_num, root, inum, offset);
174 return ret;
175 }
176 ret = paths_from_inode(inum, ipath);
177 if (ret < 0)
178 goto err;
179
180 /*
181 * We deliberately ignore the bit ipath might have been too small to
182 * hold all of the paths here
183 */
184 for (int i = 0; i < ipath->fspath->elem_cnt; i++) {
185 btrfs_warn(fs_info,
186 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu length %u links %u (path: %s)",
187 warn->logical, warn->mirror_num, root, inum, offset,
188 fs_info->sectorsize, nlink,
189 (char *)(unsigned long)ipath->fspath->val[i]);
190 }
191
192 btrfs_put_root(local_root);
193 free_ipath(ipath);
194 return 0;
195
196 err:
197 btrfs_warn(fs_info,
198 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu, path resolving failed with ret=%d",
199 warn->logical, warn->mirror_num, root, inum, offset, ret);
200
201 free_ipath(ipath);
202 return ret;
203 }
204
205 /*
206 * Do extra user-friendly error output (e.g. lookup all the affected files).
207 *
208 * Return true if we succeeded doing the backref lookup.
209 * Return false if such lookup failed, and has to fallback to the old error message.
210 */
print_data_reloc_error(const struct btrfs_inode * inode,u64 file_off,const u8 * csum,const u8 * csum_expected,int mirror_num)211 static void print_data_reloc_error(const struct btrfs_inode *inode, u64 file_off,
212 const u8 *csum, const u8 *csum_expected,
213 int mirror_num)
214 {
215 struct btrfs_fs_info *fs_info = inode->root->fs_info;
216 struct btrfs_path path = { 0 };
217 struct btrfs_key found_key = { 0 };
218 struct extent_buffer *eb;
219 struct btrfs_extent_item *ei;
220 const u32 csum_size = fs_info->csum_size;
221 u64 logical;
222 u64 flags;
223 u32 item_size;
224 int ret;
225
226 mutex_lock(&fs_info->reloc_mutex);
227 logical = btrfs_get_reloc_bg_bytenr(fs_info);
228 mutex_unlock(&fs_info->reloc_mutex);
229
230 if (logical == U64_MAX) {
231 btrfs_warn_rl(fs_info, "has data reloc tree but no running relocation");
232 btrfs_warn_rl(fs_info,
233 "csum failed root %lld ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
234 btrfs_root_id(inode->root), btrfs_ino(inode), file_off,
235 CSUM_FMT_VALUE(csum_size, csum),
236 CSUM_FMT_VALUE(csum_size, csum_expected),
237 mirror_num);
238 return;
239 }
240
241 logical += file_off;
242 btrfs_warn_rl(fs_info,
243 "csum failed root %lld ino %llu off %llu logical %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
244 btrfs_root_id(inode->root),
245 btrfs_ino(inode), file_off, logical,
246 CSUM_FMT_VALUE(csum_size, csum),
247 CSUM_FMT_VALUE(csum_size, csum_expected),
248 mirror_num);
249
250 ret = extent_from_logical(fs_info, logical, &path, &found_key, &flags);
251 if (ret < 0) {
252 btrfs_err_rl(fs_info, "failed to lookup extent item for logical %llu: %d",
253 logical, ret);
254 return;
255 }
256 eb = path.nodes[0];
257 ei = btrfs_item_ptr(eb, path.slots[0], struct btrfs_extent_item);
258 item_size = btrfs_item_size(eb, path.slots[0]);
259 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
260 unsigned long ptr = 0;
261 u64 ref_root;
262 u8 ref_level;
263
264 while (true) {
265 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
266 item_size, &ref_root,
267 &ref_level);
268 if (ret < 0) {
269 btrfs_warn_rl(fs_info,
270 "failed to resolve tree backref for logical %llu: %d",
271 logical, ret);
272 break;
273 }
274 if (ret > 0)
275 break;
276
277 btrfs_warn_rl(fs_info,
278 "csum error at logical %llu mirror %u: metadata %s (level %d) in tree %llu",
279 logical, mirror_num,
280 (ref_level ? "node" : "leaf"),
281 ref_level, ref_root);
282 }
283 btrfs_release_path(&path);
284 } else {
285 struct btrfs_backref_walk_ctx ctx = { 0 };
286 struct data_reloc_warn reloc_warn = { 0 };
287
288 btrfs_release_path(&path);
289
290 ctx.bytenr = found_key.objectid;
291 ctx.extent_item_pos = logical - found_key.objectid;
292 ctx.fs_info = fs_info;
293
294 reloc_warn.logical = logical;
295 reloc_warn.extent_item_size = found_key.offset;
296 reloc_warn.mirror_num = mirror_num;
297 reloc_warn.fs_info = fs_info;
298
299 iterate_extent_inodes(&ctx, true,
300 data_reloc_print_warning_inode, &reloc_warn);
301 }
302 }
303
btrfs_print_data_csum_error(struct btrfs_inode * inode,u64 logical_start,u8 * csum,u8 * csum_expected,int mirror_num)304 static void __cold btrfs_print_data_csum_error(struct btrfs_inode *inode,
305 u64 logical_start, u8 *csum, u8 *csum_expected, int mirror_num)
306 {
307 struct btrfs_root *root = inode->root;
308 const u32 csum_size = root->fs_info->csum_size;
309
310 /* For data reloc tree, it's better to do a backref lookup instead. */
311 if (btrfs_root_id(root) == BTRFS_DATA_RELOC_TREE_OBJECTID)
312 return print_data_reloc_error(inode, logical_start, csum,
313 csum_expected, mirror_num);
314
315 /* Output without objectid, which is more meaningful */
316 if (btrfs_root_id(root) >= BTRFS_LAST_FREE_OBJECTID) {
317 btrfs_warn_rl(root->fs_info,
318 "csum failed root %lld ino %lld off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
319 btrfs_root_id(root), btrfs_ino(inode),
320 logical_start,
321 CSUM_FMT_VALUE(csum_size, csum),
322 CSUM_FMT_VALUE(csum_size, csum_expected),
323 mirror_num);
324 } else {
325 btrfs_warn_rl(root->fs_info,
326 "csum failed root %llu ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
327 btrfs_root_id(root), btrfs_ino(inode),
328 logical_start,
329 CSUM_FMT_VALUE(csum_size, csum),
330 CSUM_FMT_VALUE(csum_size, csum_expected),
331 mirror_num);
332 }
333 }
334
335 /*
336 * Lock inode i_rwsem based on arguments passed.
337 *
338 * ilock_flags can have the following bit set:
339 *
340 * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode
341 * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt
342 * return -EAGAIN
343 * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock
344 */
btrfs_inode_lock(struct btrfs_inode * inode,unsigned int ilock_flags)345 int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags)
346 {
347 if (ilock_flags & BTRFS_ILOCK_SHARED) {
348 if (ilock_flags & BTRFS_ILOCK_TRY) {
349 if (!inode_trylock_shared(&inode->vfs_inode))
350 return -EAGAIN;
351 else
352 return 0;
353 }
354 inode_lock_shared(&inode->vfs_inode);
355 } else {
356 if (ilock_flags & BTRFS_ILOCK_TRY) {
357 if (!inode_trylock(&inode->vfs_inode))
358 return -EAGAIN;
359 else
360 return 0;
361 }
362 inode_lock(&inode->vfs_inode);
363 }
364 if (ilock_flags & BTRFS_ILOCK_MMAP)
365 down_write(&inode->i_mmap_lock);
366 return 0;
367 }
368
369 /*
370 * Unock inode i_rwsem.
371 *
372 * ilock_flags should contain the same bits set as passed to btrfs_inode_lock()
373 * to decide whether the lock acquired is shared or exclusive.
374 */
btrfs_inode_unlock(struct btrfs_inode * inode,unsigned int ilock_flags)375 void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags)
376 {
377 if (ilock_flags & BTRFS_ILOCK_MMAP)
378 up_write(&inode->i_mmap_lock);
379 if (ilock_flags & BTRFS_ILOCK_SHARED)
380 inode_unlock_shared(&inode->vfs_inode);
381 else
382 inode_unlock(&inode->vfs_inode);
383 }
384
385 /*
386 * Cleanup all submitted ordered extents in specified range to handle errors
387 * from the btrfs_run_delalloc_range() callback.
388 *
389 * NOTE: caller must ensure that when an error happens, it can not call
390 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
391 * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
392 * to be released, which we want to happen only when finishing the ordered
393 * extent (btrfs_finish_ordered_io()).
394 */
btrfs_cleanup_ordered_extents(struct btrfs_inode * inode,u64 offset,u64 bytes)395 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
396 u64 offset, u64 bytes)
397 {
398 unsigned long index = offset >> PAGE_SHIFT;
399 unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
400 struct folio *folio;
401
402 while (index <= end_index) {
403 folio = filemap_get_folio(inode->vfs_inode.i_mapping, index);
404 index++;
405 if (IS_ERR(folio))
406 continue;
407
408 /*
409 * Here we just clear all Ordered bits for every page in the
410 * range, then btrfs_mark_ordered_io_finished() will handle
411 * the ordered extent accounting for the range.
412 */
413 btrfs_folio_clamp_clear_ordered(inode->root->fs_info, folio,
414 offset, bytes);
415 folio_put(folio);
416 }
417
418 return btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes, false);
419 }
420
421 static int btrfs_dirty_inode(struct btrfs_inode *inode);
422
btrfs_init_inode_security(struct btrfs_trans_handle * trans,struct btrfs_new_inode_args * args)423 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
424 struct btrfs_new_inode_args *args)
425 {
426 int err;
427
428 if (args->default_acl) {
429 err = __btrfs_set_acl(trans, args->inode, args->default_acl,
430 ACL_TYPE_DEFAULT);
431 if (err)
432 return err;
433 }
434 if (args->acl) {
435 err = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS);
436 if (err)
437 return err;
438 }
439 if (!args->default_acl && !args->acl)
440 cache_no_acl(args->inode);
441 return btrfs_xattr_security_init(trans, args->inode, args->dir,
442 &args->dentry->d_name);
443 }
444
445 /*
446 * this does all the hard work for inserting an inline extent into
447 * the btree. The caller should have done a btrfs_drop_extents so that
448 * no overlapping inline items exist in the btree
449 */
insert_inline_extent(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_inode * inode,bool extent_inserted,size_t size,size_t compressed_size,int compress_type,struct folio * compressed_folio,bool update_i_size)450 static int insert_inline_extent(struct btrfs_trans_handle *trans,
451 struct btrfs_path *path,
452 struct btrfs_inode *inode, bool extent_inserted,
453 size_t size, size_t compressed_size,
454 int compress_type,
455 struct folio *compressed_folio,
456 bool update_i_size)
457 {
458 struct btrfs_root *root = inode->root;
459 struct extent_buffer *leaf;
460 const u32 sectorsize = trans->fs_info->sectorsize;
461 char *kaddr;
462 unsigned long ptr;
463 struct btrfs_file_extent_item *ei;
464 int ret;
465 size_t cur_size = size;
466 u64 i_size;
467
468 /*
469 * The decompressed size must still be no larger than a sector. Under
470 * heavy race, we can have size == 0 passed in, but that shouldn't be a
471 * big deal and we can continue the insertion.
472 */
473 ASSERT(size <= sectorsize);
474
475 /*
476 * The compressed size also needs to be no larger than a sector.
477 * That's also why we only need one page as the parameter.
478 */
479 if (compressed_folio)
480 ASSERT(compressed_size <= sectorsize);
481 else
482 ASSERT(compressed_size == 0);
483
484 if (compressed_size && compressed_folio)
485 cur_size = compressed_size;
486
487 if (!extent_inserted) {
488 struct btrfs_key key;
489 size_t datasize;
490
491 key.objectid = btrfs_ino(inode);
492 key.offset = 0;
493 key.type = BTRFS_EXTENT_DATA_KEY;
494
495 datasize = btrfs_file_extent_calc_inline_size(cur_size);
496 ret = btrfs_insert_empty_item(trans, root, path, &key,
497 datasize);
498 if (ret)
499 goto fail;
500 }
501 leaf = path->nodes[0];
502 ei = btrfs_item_ptr(leaf, path->slots[0],
503 struct btrfs_file_extent_item);
504 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
505 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
506 btrfs_set_file_extent_encryption(leaf, ei, 0);
507 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
508 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
509 ptr = btrfs_file_extent_inline_start(ei);
510
511 if (compress_type != BTRFS_COMPRESS_NONE) {
512 kaddr = kmap_local_folio(compressed_folio, 0);
513 write_extent_buffer(leaf, kaddr, ptr, compressed_size);
514 kunmap_local(kaddr);
515
516 btrfs_set_file_extent_compression(leaf, ei,
517 compress_type);
518 } else {
519 struct folio *folio;
520
521 folio = filemap_get_folio(inode->vfs_inode.i_mapping, 0);
522 ASSERT(!IS_ERR(folio));
523 btrfs_set_file_extent_compression(leaf, ei, 0);
524 kaddr = kmap_local_folio(folio, 0);
525 write_extent_buffer(leaf, kaddr, ptr, size);
526 kunmap_local(kaddr);
527 folio_put(folio);
528 }
529 btrfs_release_path(path);
530
531 /*
532 * We align size to sectorsize for inline extents just for simplicity
533 * sake.
534 */
535 ret = btrfs_inode_set_file_extent_range(inode, 0,
536 ALIGN(size, root->fs_info->sectorsize));
537 if (ret)
538 goto fail;
539
540 /*
541 * We're an inline extent, so nobody can extend the file past i_size
542 * without locking a page we already have locked.
543 *
544 * We must do any i_size and inode updates before we unlock the pages.
545 * Otherwise we could end up racing with unlink.
546 */
547 i_size = i_size_read(&inode->vfs_inode);
548 if (update_i_size && size > i_size) {
549 i_size_write(&inode->vfs_inode, size);
550 i_size = size;
551 }
552 inode->disk_i_size = i_size;
553
554 fail:
555 return ret;
556 }
557
can_cow_file_range_inline(struct btrfs_inode * inode,u64 offset,u64 size,size_t compressed_size)558 static bool can_cow_file_range_inline(struct btrfs_inode *inode,
559 u64 offset, u64 size,
560 size_t compressed_size)
561 {
562 struct btrfs_fs_info *fs_info = inode->root->fs_info;
563 u64 data_len = (compressed_size ?: size);
564
565 /* Inline extents must start at offset 0. */
566 if (offset != 0)
567 return false;
568
569 /*
570 * Due to the page size limit, for subpage we can only trigger the
571 * writeback for the dirty sectors of page, that means data writeback
572 * is doing more writeback than what we want.
573 *
574 * This is especially unexpected for some call sites like fallocate,
575 * where we only increase i_size after everything is done.
576 * This means we can trigger inline extent even if we didn't want to.
577 * So here we skip inline extent creation completely.
578 */
579 if (fs_info->sectorsize != PAGE_SIZE)
580 return false;
581
582 /* Inline extents are limited to sectorsize. */
583 if (size > fs_info->sectorsize)
584 return false;
585
586 /* We cannot exceed the maximum inline data size. */
587 if (data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
588 return false;
589
590 /* We cannot exceed the user specified max_inline size. */
591 if (data_len > fs_info->max_inline)
592 return false;
593
594 /* Inline extents must be the entirety of the file. */
595 if (size < i_size_read(&inode->vfs_inode))
596 return false;
597
598 return true;
599 }
600
601 /*
602 * conditionally insert an inline extent into the file. This
603 * does the checks required to make sure the data is small enough
604 * to fit as an inline extent.
605 *
606 * If being used directly, you must have already checked we're allowed to cow
607 * the range by getting true from can_cow_file_range_inline().
608 */
__cow_file_range_inline(struct btrfs_inode * inode,u64 size,size_t compressed_size,int compress_type,struct folio * compressed_folio,bool update_i_size)609 static noinline int __cow_file_range_inline(struct btrfs_inode *inode,
610 u64 size, size_t compressed_size,
611 int compress_type,
612 struct folio *compressed_folio,
613 bool update_i_size)
614 {
615 struct btrfs_drop_extents_args drop_args = { 0 };
616 struct btrfs_root *root = inode->root;
617 struct btrfs_fs_info *fs_info = root->fs_info;
618 struct btrfs_trans_handle *trans;
619 u64 data_len = (compressed_size ?: size);
620 int ret;
621 struct btrfs_path *path;
622
623 path = btrfs_alloc_path();
624 if (!path)
625 return -ENOMEM;
626
627 trans = btrfs_join_transaction(root);
628 if (IS_ERR(trans)) {
629 btrfs_free_path(path);
630 return PTR_ERR(trans);
631 }
632 trans->block_rsv = &inode->block_rsv;
633
634 drop_args.path = path;
635 drop_args.start = 0;
636 drop_args.end = fs_info->sectorsize;
637 drop_args.drop_cache = true;
638 drop_args.replace_extent = true;
639 drop_args.extent_item_size = btrfs_file_extent_calc_inline_size(data_len);
640 ret = btrfs_drop_extents(trans, root, inode, &drop_args);
641 if (ret) {
642 btrfs_abort_transaction(trans, ret);
643 goto out;
644 }
645
646 ret = insert_inline_extent(trans, path, inode, drop_args.extent_inserted,
647 size, compressed_size, compress_type,
648 compressed_folio, update_i_size);
649 if (ret && ret != -ENOSPC) {
650 btrfs_abort_transaction(trans, ret);
651 goto out;
652 } else if (ret == -ENOSPC) {
653 ret = 1;
654 goto out;
655 }
656
657 btrfs_update_inode_bytes(inode, size, drop_args.bytes_found);
658 ret = btrfs_update_inode(trans, inode);
659 if (ret && ret != -ENOSPC) {
660 btrfs_abort_transaction(trans, ret);
661 goto out;
662 } else if (ret == -ENOSPC) {
663 ret = 1;
664 goto out;
665 }
666
667 btrfs_set_inode_full_sync(inode);
668 out:
669 /*
670 * Don't forget to free the reserved space, as for inlined extent
671 * it won't count as data extent, free them directly here.
672 * And at reserve time, it's always aligned to page size, so
673 * just free one page here.
674 */
675 btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE, NULL);
676 btrfs_free_path(path);
677 btrfs_end_transaction(trans);
678 return ret;
679 }
680
cow_file_range_inline(struct btrfs_inode * inode,struct folio * locked_folio,u64 offset,u64 end,size_t compressed_size,int compress_type,struct folio * compressed_folio,bool update_i_size)681 static noinline int cow_file_range_inline(struct btrfs_inode *inode,
682 struct folio *locked_folio,
683 u64 offset, u64 end,
684 size_t compressed_size,
685 int compress_type,
686 struct folio *compressed_folio,
687 bool update_i_size)
688 {
689 struct extent_state *cached = NULL;
690 unsigned long clear_flags = EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
691 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING | EXTENT_LOCKED;
692 u64 size = min_t(u64, i_size_read(&inode->vfs_inode), end + 1);
693 int ret;
694
695 if (!can_cow_file_range_inline(inode, offset, size, compressed_size))
696 return 1;
697
698 lock_extent(&inode->io_tree, offset, end, &cached);
699 ret = __cow_file_range_inline(inode, size, compressed_size,
700 compress_type, compressed_folio,
701 update_i_size);
702 if (ret > 0) {
703 unlock_extent(&inode->io_tree, offset, end, &cached);
704 return ret;
705 }
706
707 /*
708 * In the successful case (ret == 0 here), cow_file_range will return 1.
709 *
710 * Quite a bit further up the callstack in extent_writepage(), ret == 1
711 * is treated as a short circuited success and does not unlock the folio,
712 * so we must do it here.
713 *
714 * In the failure case, the locked_folio does get unlocked by
715 * btrfs_folio_end_all_writers, which asserts that it is still locked
716 * at that point, so we must *not* unlock it here.
717 *
718 * The other two callsites in compress_file_range do not have a
719 * locked_folio, so they are not relevant to this logic.
720 */
721 if (ret == 0)
722 locked_folio = NULL;
723
724 extent_clear_unlock_delalloc(inode, offset, end, locked_folio, &cached,
725 clear_flags, PAGE_UNLOCK |
726 PAGE_START_WRITEBACK | PAGE_END_WRITEBACK);
727 return ret;
728 }
729
730 struct async_extent {
731 u64 start;
732 u64 ram_size;
733 u64 compressed_size;
734 struct folio **folios;
735 unsigned long nr_folios;
736 int compress_type;
737 struct list_head list;
738 };
739
740 struct async_chunk {
741 struct btrfs_inode *inode;
742 struct folio *locked_folio;
743 u64 start;
744 u64 end;
745 blk_opf_t write_flags;
746 struct list_head extents;
747 struct cgroup_subsys_state *blkcg_css;
748 struct btrfs_work work;
749 struct async_cow *async_cow;
750 };
751
752 struct async_cow {
753 atomic_t num_chunks;
754 struct async_chunk chunks[];
755 };
756
add_async_extent(struct async_chunk * cow,u64 start,u64 ram_size,u64 compressed_size,struct folio ** folios,unsigned long nr_folios,int compress_type)757 static noinline int add_async_extent(struct async_chunk *cow,
758 u64 start, u64 ram_size,
759 u64 compressed_size,
760 struct folio **folios,
761 unsigned long nr_folios,
762 int compress_type)
763 {
764 struct async_extent *async_extent;
765
766 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
767 if (!async_extent)
768 return -ENOMEM;
769 async_extent->start = start;
770 async_extent->ram_size = ram_size;
771 async_extent->compressed_size = compressed_size;
772 async_extent->folios = folios;
773 async_extent->nr_folios = nr_folios;
774 async_extent->compress_type = compress_type;
775 list_add_tail(&async_extent->list, &cow->extents);
776 return 0;
777 }
778
779 /*
780 * Check if the inode needs to be submitted to compression, based on mount
781 * options, defragmentation, properties or heuristics.
782 */
inode_need_compress(struct btrfs_inode * inode,u64 start,u64 end)783 static inline int inode_need_compress(struct btrfs_inode *inode, u64 start,
784 u64 end)
785 {
786 struct btrfs_fs_info *fs_info = inode->root->fs_info;
787
788 if (!btrfs_inode_can_compress(inode)) {
789 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
790 KERN_ERR "BTRFS: unexpected compression for ino %llu\n",
791 btrfs_ino(inode));
792 return 0;
793 }
794 /*
795 * Only enable sector perfect compression for experimental builds.
796 *
797 * This is a big feature change for subpage cases, and can hit
798 * different corner cases, so only limit this feature for
799 * experimental build for now.
800 *
801 * ETA for moving this out of experimental builds is 6.15.
802 */
803 if (fs_info->sectorsize < PAGE_SIZE &&
804 !IS_ENABLED(CONFIG_BTRFS_EXPERIMENTAL)) {
805 if (!PAGE_ALIGNED(start) ||
806 !PAGE_ALIGNED(end + 1))
807 return 0;
808 }
809
810 /* force compress */
811 if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
812 return 1;
813 /* defrag ioctl */
814 if (inode->defrag_compress)
815 return 1;
816 /* bad compression ratios */
817 if (inode->flags & BTRFS_INODE_NOCOMPRESS)
818 return 0;
819 if (btrfs_test_opt(fs_info, COMPRESS) ||
820 inode->flags & BTRFS_INODE_COMPRESS ||
821 inode->prop_compress)
822 return btrfs_compress_heuristic(inode, start, end);
823 return 0;
824 }
825
inode_should_defrag(struct btrfs_inode * inode,u64 start,u64 end,u64 num_bytes,u32 small_write)826 static inline void inode_should_defrag(struct btrfs_inode *inode,
827 u64 start, u64 end, u64 num_bytes, u32 small_write)
828 {
829 /* If this is a small write inside eof, kick off a defrag */
830 if (num_bytes < small_write &&
831 (start > 0 || end + 1 < inode->disk_i_size))
832 btrfs_add_inode_defrag(inode, small_write);
833 }
834
extent_range_clear_dirty_for_io(struct inode * inode,u64 start,u64 end)835 static int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
836 {
837 unsigned long end_index = end >> PAGE_SHIFT;
838 struct folio *folio;
839 int ret = 0;
840
841 for (unsigned long index = start >> PAGE_SHIFT;
842 index <= end_index; index++) {
843 folio = filemap_get_folio(inode->i_mapping, index);
844 if (IS_ERR(folio)) {
845 if (!ret)
846 ret = PTR_ERR(folio);
847 continue;
848 }
849 btrfs_folio_clamp_clear_dirty(inode_to_fs_info(inode), folio, start,
850 end + 1 - start);
851 folio_put(folio);
852 }
853 return ret;
854 }
855
856 /*
857 * Work queue call back to started compression on a file and pages.
858 *
859 * This is done inside an ordered work queue, and the compression is spread
860 * across many cpus. The actual IO submission is step two, and the ordered work
861 * queue takes care of making sure that happens in the same order things were
862 * put onto the queue by writepages and friends.
863 *
864 * If this code finds it can't get good compression, it puts an entry onto the
865 * work queue to write the uncompressed bytes. This makes sure that both
866 * compressed inodes and uncompressed inodes are written in the same order that
867 * the flusher thread sent them down.
868 */
compress_file_range(struct btrfs_work * work)869 static void compress_file_range(struct btrfs_work *work)
870 {
871 struct async_chunk *async_chunk =
872 container_of(work, struct async_chunk, work);
873 struct btrfs_inode *inode = async_chunk->inode;
874 struct btrfs_fs_info *fs_info = inode->root->fs_info;
875 struct address_space *mapping = inode->vfs_inode.i_mapping;
876 u64 blocksize = fs_info->sectorsize;
877 u64 start = async_chunk->start;
878 u64 end = async_chunk->end;
879 u64 actual_end;
880 u64 i_size;
881 int ret = 0;
882 struct folio **folios;
883 unsigned long nr_folios;
884 unsigned long total_compressed = 0;
885 unsigned long total_in = 0;
886 unsigned int poff;
887 int i;
888 int compress_type = fs_info->compress_type;
889
890 inode_should_defrag(inode, start, end, end - start + 1, SZ_16K);
891
892 /*
893 * We need to call clear_page_dirty_for_io on each page in the range.
894 * Otherwise applications with the file mmap'd can wander in and change
895 * the page contents while we are compressing them.
896 */
897 ret = extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end);
898
899 /*
900 * All the folios should have been locked thus no failure.
901 *
902 * And even if some folios are missing, btrfs_compress_folios()
903 * would handle them correctly, so here just do an ASSERT() check for
904 * early logic errors.
905 */
906 ASSERT(ret == 0);
907
908 /*
909 * We need to save i_size before now because it could change in between
910 * us evaluating the size and assigning it. This is because we lock and
911 * unlock the page in truncate and fallocate, and then modify the i_size
912 * later on.
913 *
914 * The barriers are to emulate READ_ONCE, remove that once i_size_read
915 * does that for us.
916 */
917 barrier();
918 i_size = i_size_read(&inode->vfs_inode);
919 barrier();
920 actual_end = min_t(u64, i_size, end + 1);
921 again:
922 folios = NULL;
923 nr_folios = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
924 nr_folios = min_t(unsigned long, nr_folios, BTRFS_MAX_COMPRESSED_PAGES);
925
926 /*
927 * we don't want to send crud past the end of i_size through
928 * compression, that's just a waste of CPU time. So, if the
929 * end of the file is before the start of our current
930 * requested range of bytes, we bail out to the uncompressed
931 * cleanup code that can deal with all of this.
932 *
933 * It isn't really the fastest way to fix things, but this is a
934 * very uncommon corner.
935 */
936 if (actual_end <= start)
937 goto cleanup_and_bail_uncompressed;
938
939 total_compressed = actual_end - start;
940
941 /*
942 * Skip compression for a small file range(<=blocksize) that
943 * isn't an inline extent, since it doesn't save disk space at all.
944 */
945 if (total_compressed <= blocksize &&
946 (start > 0 || end + 1 < inode->disk_i_size))
947 goto cleanup_and_bail_uncompressed;
948
949 total_compressed = min_t(unsigned long, total_compressed,
950 BTRFS_MAX_UNCOMPRESSED);
951 total_in = 0;
952 ret = 0;
953
954 /*
955 * We do compression for mount -o compress and when the inode has not
956 * been flagged as NOCOMPRESS. This flag can change at any time if we
957 * discover bad compression ratios.
958 */
959 if (!inode_need_compress(inode, start, end))
960 goto cleanup_and_bail_uncompressed;
961
962 folios = kcalloc(nr_folios, sizeof(struct folio *), GFP_NOFS);
963 if (!folios) {
964 /*
965 * Memory allocation failure is not a fatal error, we can fall
966 * back to uncompressed code.
967 */
968 goto cleanup_and_bail_uncompressed;
969 }
970
971 if (inode->defrag_compress)
972 compress_type = inode->defrag_compress;
973 else if (inode->prop_compress)
974 compress_type = inode->prop_compress;
975
976 /* Compression level is applied here. */
977 ret = btrfs_compress_folios(compress_type | (fs_info->compress_level << 4),
978 mapping, start, folios, &nr_folios, &total_in,
979 &total_compressed);
980 if (ret)
981 goto mark_incompressible;
982
983 /*
984 * Zero the tail end of the last page, as we might be sending it down
985 * to disk.
986 */
987 poff = offset_in_page(total_compressed);
988 if (poff)
989 folio_zero_range(folios[nr_folios - 1], poff, PAGE_SIZE - poff);
990
991 /*
992 * Try to create an inline extent.
993 *
994 * If we didn't compress the entire range, try to create an uncompressed
995 * inline extent, else a compressed one.
996 *
997 * Check cow_file_range() for why we don't even try to create inline
998 * extent for the subpage case.
999 */
1000 if (total_in < actual_end)
1001 ret = cow_file_range_inline(inode, NULL, start, end, 0,
1002 BTRFS_COMPRESS_NONE, NULL, false);
1003 else
1004 ret = cow_file_range_inline(inode, NULL, start, end, total_compressed,
1005 compress_type, folios[0], false);
1006 if (ret <= 0) {
1007 if (ret < 0)
1008 mapping_set_error(mapping, -EIO);
1009 goto free_pages;
1010 }
1011
1012 /*
1013 * We aren't doing an inline extent. Round the compressed size up to a
1014 * block size boundary so the allocator does sane things.
1015 */
1016 total_compressed = ALIGN(total_compressed, blocksize);
1017
1018 /*
1019 * One last check to make sure the compression is really a win, compare
1020 * the page count read with the blocks on disk, compression must free at
1021 * least one sector.
1022 */
1023 total_in = round_up(total_in, fs_info->sectorsize);
1024 if (total_compressed + blocksize > total_in)
1025 goto mark_incompressible;
1026
1027 /*
1028 * The async work queues will take care of doing actual allocation on
1029 * disk for these compressed pages, and will submit the bios.
1030 */
1031 ret = add_async_extent(async_chunk, start, total_in, total_compressed, folios,
1032 nr_folios, compress_type);
1033 BUG_ON(ret);
1034 if (start + total_in < end) {
1035 start += total_in;
1036 cond_resched();
1037 goto again;
1038 }
1039 return;
1040
1041 mark_incompressible:
1042 if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && !inode->prop_compress)
1043 inode->flags |= BTRFS_INODE_NOCOMPRESS;
1044 cleanup_and_bail_uncompressed:
1045 ret = add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0,
1046 BTRFS_COMPRESS_NONE);
1047 BUG_ON(ret);
1048 free_pages:
1049 if (folios) {
1050 for (i = 0; i < nr_folios; i++) {
1051 WARN_ON(folios[i]->mapping);
1052 btrfs_free_compr_folio(folios[i]);
1053 }
1054 kfree(folios);
1055 }
1056 }
1057
free_async_extent_pages(struct async_extent * async_extent)1058 static void free_async_extent_pages(struct async_extent *async_extent)
1059 {
1060 int i;
1061
1062 if (!async_extent->folios)
1063 return;
1064
1065 for (i = 0; i < async_extent->nr_folios; i++) {
1066 WARN_ON(async_extent->folios[i]->mapping);
1067 btrfs_free_compr_folio(async_extent->folios[i]);
1068 }
1069 kfree(async_extent->folios);
1070 async_extent->nr_folios = 0;
1071 async_extent->folios = NULL;
1072 }
1073
submit_uncompressed_range(struct btrfs_inode * inode,struct async_extent * async_extent,struct folio * locked_folio)1074 static void submit_uncompressed_range(struct btrfs_inode *inode,
1075 struct async_extent *async_extent,
1076 struct folio *locked_folio)
1077 {
1078 u64 start = async_extent->start;
1079 u64 end = async_extent->start + async_extent->ram_size - 1;
1080 int ret;
1081 struct writeback_control wbc = {
1082 .sync_mode = WB_SYNC_ALL,
1083 .range_start = start,
1084 .range_end = end,
1085 .no_cgroup_owner = 1,
1086 };
1087
1088 wbc_attach_fdatawrite_inode(&wbc, &inode->vfs_inode);
1089 ret = run_delalloc_cow(inode, locked_folio, start, end,
1090 &wbc, false);
1091 wbc_detach_inode(&wbc);
1092 if (ret < 0) {
1093 btrfs_cleanup_ordered_extents(inode, start, end - start + 1);
1094 if (locked_folio)
1095 btrfs_folio_end_lock(inode->root->fs_info, locked_folio,
1096 start, async_extent->ram_size);
1097 btrfs_err_rl(inode->root->fs_info,
1098 "%s failed, root=%llu inode=%llu start=%llu len=%llu: %d",
1099 __func__, btrfs_root_id(inode->root),
1100 btrfs_ino(inode), start, async_extent->ram_size, ret);
1101 }
1102 }
1103
submit_one_async_extent(struct async_chunk * async_chunk,struct async_extent * async_extent,u64 * alloc_hint)1104 static void submit_one_async_extent(struct async_chunk *async_chunk,
1105 struct async_extent *async_extent,
1106 u64 *alloc_hint)
1107 {
1108 struct btrfs_inode *inode = async_chunk->inode;
1109 struct extent_io_tree *io_tree = &inode->io_tree;
1110 struct btrfs_root *root = inode->root;
1111 struct btrfs_fs_info *fs_info = root->fs_info;
1112 struct btrfs_ordered_extent *ordered;
1113 struct btrfs_file_extent file_extent;
1114 struct btrfs_key ins;
1115 struct folio *locked_folio = NULL;
1116 struct extent_state *cached = NULL;
1117 struct extent_map *em;
1118 int ret = 0;
1119 u64 start = async_extent->start;
1120 u64 end = async_extent->start + async_extent->ram_size - 1;
1121
1122 if (async_chunk->blkcg_css)
1123 kthread_associate_blkcg(async_chunk->blkcg_css);
1124
1125 /*
1126 * If async_chunk->locked_folio is in the async_extent range, we need to
1127 * handle it.
1128 */
1129 if (async_chunk->locked_folio) {
1130 u64 locked_folio_start = folio_pos(async_chunk->locked_folio);
1131 u64 locked_folio_end = locked_folio_start +
1132 folio_size(async_chunk->locked_folio) - 1;
1133
1134 if (!(start >= locked_folio_end || end <= locked_folio_start))
1135 locked_folio = async_chunk->locked_folio;
1136 }
1137
1138 if (async_extent->compress_type == BTRFS_COMPRESS_NONE) {
1139 submit_uncompressed_range(inode, async_extent, locked_folio);
1140 goto done;
1141 }
1142
1143 ret = btrfs_reserve_extent(root, async_extent->ram_size,
1144 async_extent->compressed_size,
1145 async_extent->compressed_size,
1146 0, *alloc_hint, &ins, 1, 1);
1147 if (ret) {
1148 /*
1149 * We can't reserve contiguous space for the compressed size.
1150 * Unlikely, but it's possible that we could have enough
1151 * non-contiguous space for the uncompressed size instead. So
1152 * fall back to uncompressed.
1153 */
1154 submit_uncompressed_range(inode, async_extent, locked_folio);
1155 goto done;
1156 }
1157
1158 lock_extent(io_tree, start, end, &cached);
1159
1160 /* Here we're doing allocation and writeback of the compressed pages */
1161 file_extent.disk_bytenr = ins.objectid;
1162 file_extent.disk_num_bytes = ins.offset;
1163 file_extent.ram_bytes = async_extent->ram_size;
1164 file_extent.num_bytes = async_extent->ram_size;
1165 file_extent.offset = 0;
1166 file_extent.compression = async_extent->compress_type;
1167
1168 em = btrfs_create_io_em(inode, start, &file_extent, BTRFS_ORDERED_COMPRESSED);
1169 if (IS_ERR(em)) {
1170 ret = PTR_ERR(em);
1171 goto out_free_reserve;
1172 }
1173 free_extent_map(em);
1174
1175 ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
1176 1 << BTRFS_ORDERED_COMPRESSED);
1177 if (IS_ERR(ordered)) {
1178 btrfs_drop_extent_map_range(inode, start, end, false);
1179 ret = PTR_ERR(ordered);
1180 goto out_free_reserve;
1181 }
1182 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1183
1184 /* Clear dirty, set writeback and unlock the pages. */
1185 extent_clear_unlock_delalloc(inode, start, end,
1186 NULL, &cached, EXTENT_LOCKED | EXTENT_DELALLOC,
1187 PAGE_UNLOCK | PAGE_START_WRITEBACK);
1188 btrfs_submit_compressed_write(ordered,
1189 async_extent->folios, /* compressed_folios */
1190 async_extent->nr_folios,
1191 async_chunk->write_flags, true);
1192 *alloc_hint = ins.objectid + ins.offset;
1193 done:
1194 if (async_chunk->blkcg_css)
1195 kthread_associate_blkcg(NULL);
1196 kfree(async_extent);
1197 return;
1198
1199 out_free_reserve:
1200 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1201 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
1202 mapping_set_error(inode->vfs_inode.i_mapping, -EIO);
1203 extent_clear_unlock_delalloc(inode, start, end,
1204 NULL, &cached,
1205 EXTENT_LOCKED | EXTENT_DELALLOC |
1206 EXTENT_DELALLOC_NEW |
1207 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
1208 PAGE_UNLOCK | PAGE_START_WRITEBACK |
1209 PAGE_END_WRITEBACK);
1210 free_async_extent_pages(async_extent);
1211 if (async_chunk->blkcg_css)
1212 kthread_associate_blkcg(NULL);
1213 btrfs_debug(fs_info,
1214 "async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d",
1215 btrfs_root_id(root), btrfs_ino(inode), start,
1216 async_extent->ram_size, ret);
1217 kfree(async_extent);
1218 }
1219
btrfs_get_extent_allocation_hint(struct btrfs_inode * inode,u64 start,u64 num_bytes)1220 u64 btrfs_get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
1221 u64 num_bytes)
1222 {
1223 struct extent_map_tree *em_tree = &inode->extent_tree;
1224 struct extent_map *em;
1225 u64 alloc_hint = 0;
1226
1227 read_lock(&em_tree->lock);
1228 em = search_extent_mapping(em_tree, start, num_bytes);
1229 if (em) {
1230 /*
1231 * if block start isn't an actual block number then find the
1232 * first block in this inode and use that as a hint. If that
1233 * block is also bogus then just don't worry about it.
1234 */
1235 if (em->disk_bytenr >= EXTENT_MAP_LAST_BYTE) {
1236 free_extent_map(em);
1237 em = search_extent_mapping(em_tree, 0, 0);
1238 if (em && em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
1239 alloc_hint = extent_map_block_start(em);
1240 if (em)
1241 free_extent_map(em);
1242 } else {
1243 alloc_hint = extent_map_block_start(em);
1244 free_extent_map(em);
1245 }
1246 }
1247 read_unlock(&em_tree->lock);
1248
1249 return alloc_hint;
1250 }
1251
1252 /*
1253 * when extent_io.c finds a delayed allocation range in the file,
1254 * the call backs end up in this code. The basic idea is to
1255 * allocate extents on disk for the range, and create ordered data structs
1256 * in ram to track those extents.
1257 *
1258 * locked_folio is the folio that writepage had locked already. We use
1259 * it to make sure we don't do extra locks or unlocks.
1260 *
1261 * When this function fails, it unlocks all pages except @locked_folio.
1262 *
1263 * When this function successfully creates an inline extent, it returns 1 and
1264 * unlocks all pages including locked_folio and starts I/O on them.
1265 * (In reality inline extents are limited to a single page, so locked_folio is
1266 * the only page handled anyway).
1267 *
1268 * When this function succeed and creates a normal extent, the page locking
1269 * status depends on the passed in flags:
1270 *
1271 * - If @keep_locked is set, all pages are kept locked.
1272 * - Else all pages except for @locked_folio are unlocked.
1273 *
1274 * When a failure happens in the second or later iteration of the
1275 * while-loop, the ordered extents created in previous iterations are kept
1276 * intact. So, the caller must clean them up by calling
1277 * btrfs_cleanup_ordered_extents(). See btrfs_run_delalloc_range() for
1278 * example.
1279 */
cow_file_range(struct btrfs_inode * inode,struct folio * locked_folio,u64 start,u64 end,u64 * done_offset,bool keep_locked,bool no_inline)1280 static noinline int cow_file_range(struct btrfs_inode *inode,
1281 struct folio *locked_folio, u64 start,
1282 u64 end, u64 *done_offset,
1283 bool keep_locked, bool no_inline)
1284 {
1285 struct btrfs_root *root = inode->root;
1286 struct btrfs_fs_info *fs_info = root->fs_info;
1287 struct extent_state *cached = NULL;
1288 u64 alloc_hint = 0;
1289 u64 orig_start = start;
1290 u64 num_bytes;
1291 u64 cur_alloc_size = 0;
1292 u64 min_alloc_size;
1293 u64 blocksize = fs_info->sectorsize;
1294 struct btrfs_key ins;
1295 struct extent_map *em;
1296 unsigned clear_bits;
1297 unsigned long page_ops;
1298 int ret = 0;
1299
1300 if (btrfs_is_free_space_inode(inode)) {
1301 ret = -EINVAL;
1302 goto out_unlock;
1303 }
1304
1305 num_bytes = ALIGN(end - start + 1, blocksize);
1306 num_bytes = max(blocksize, num_bytes);
1307 ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy));
1308
1309 inode_should_defrag(inode, start, end, num_bytes, SZ_64K);
1310
1311 if (!no_inline) {
1312 /* lets try to make an inline extent */
1313 ret = cow_file_range_inline(inode, locked_folio, start, end, 0,
1314 BTRFS_COMPRESS_NONE, NULL, false);
1315 if (ret <= 0) {
1316 /*
1317 * We succeeded, return 1 so the caller knows we're done
1318 * with this page and already handled the IO.
1319 *
1320 * If there was an error then cow_file_range_inline() has
1321 * already done the cleanup.
1322 */
1323 if (ret == 0)
1324 ret = 1;
1325 goto done;
1326 }
1327 }
1328
1329 alloc_hint = btrfs_get_extent_allocation_hint(inode, start, num_bytes);
1330
1331 /*
1332 * We're not doing compressed IO, don't unlock the first page (which
1333 * the caller expects to stay locked), don't clear any dirty bits and
1334 * don't set any writeback bits.
1335 *
1336 * Do set the Ordered (Private2) bit so we know this page was properly
1337 * setup for writepage.
1338 */
1339 page_ops = (keep_locked ? 0 : PAGE_UNLOCK);
1340 page_ops |= PAGE_SET_ORDERED;
1341
1342 /*
1343 * Relocation relies on the relocated extents to have exactly the same
1344 * size as the original extents. Normally writeback for relocation data
1345 * extents follows a NOCOW path because relocation preallocates the
1346 * extents. However, due to an operation such as scrub turning a block
1347 * group to RO mode, it may fallback to COW mode, so we must make sure
1348 * an extent allocated during COW has exactly the requested size and can
1349 * not be split into smaller extents, otherwise relocation breaks and
1350 * fails during the stage where it updates the bytenr of file extent
1351 * items.
1352 */
1353 if (btrfs_is_data_reloc_root(root))
1354 min_alloc_size = num_bytes;
1355 else
1356 min_alloc_size = fs_info->sectorsize;
1357
1358 while (num_bytes > 0) {
1359 struct btrfs_ordered_extent *ordered;
1360 struct btrfs_file_extent file_extent;
1361
1362 ret = btrfs_reserve_extent(root, num_bytes, num_bytes,
1363 min_alloc_size, 0, alloc_hint,
1364 &ins, 1, 1);
1365 if (ret == -EAGAIN) {
1366 /*
1367 * btrfs_reserve_extent only returns -EAGAIN for zoned
1368 * file systems, which is an indication that there are
1369 * no active zones to allocate from at the moment.
1370 *
1371 * If this is the first loop iteration, wait for at
1372 * least one zone to finish before retrying the
1373 * allocation. Otherwise ask the caller to write out
1374 * the already allocated blocks before coming back to
1375 * us, or return -ENOSPC if it can't handle retries.
1376 */
1377 ASSERT(btrfs_is_zoned(fs_info));
1378 if (start == orig_start) {
1379 wait_on_bit_io(&inode->root->fs_info->flags,
1380 BTRFS_FS_NEED_ZONE_FINISH,
1381 TASK_UNINTERRUPTIBLE);
1382 continue;
1383 }
1384 if (done_offset) {
1385 /*
1386 * Move @end to the end of the processed range,
1387 * and exit the loop to unlock the processed extents.
1388 */
1389 end = start - 1;
1390 ret = 0;
1391 break;
1392 }
1393 ret = -ENOSPC;
1394 }
1395 if (ret < 0)
1396 goto out_unlock;
1397 cur_alloc_size = ins.offset;
1398
1399 file_extent.disk_bytenr = ins.objectid;
1400 file_extent.disk_num_bytes = ins.offset;
1401 file_extent.num_bytes = ins.offset;
1402 file_extent.ram_bytes = ins.offset;
1403 file_extent.offset = 0;
1404 file_extent.compression = BTRFS_COMPRESS_NONE;
1405
1406 /*
1407 * Locked range will be released either during error clean up or
1408 * after the whole range is finished.
1409 */
1410 lock_extent(&inode->io_tree, start, start + cur_alloc_size - 1,
1411 &cached);
1412
1413 em = btrfs_create_io_em(inode, start, &file_extent,
1414 BTRFS_ORDERED_REGULAR);
1415 if (IS_ERR(em)) {
1416 unlock_extent(&inode->io_tree, start,
1417 start + cur_alloc_size - 1, &cached);
1418 ret = PTR_ERR(em);
1419 goto out_reserve;
1420 }
1421 free_extent_map(em);
1422
1423 ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
1424 1 << BTRFS_ORDERED_REGULAR);
1425 if (IS_ERR(ordered)) {
1426 unlock_extent(&inode->io_tree, start,
1427 start + cur_alloc_size - 1, &cached);
1428 ret = PTR_ERR(ordered);
1429 goto out_drop_extent_cache;
1430 }
1431
1432 if (btrfs_is_data_reloc_root(root)) {
1433 ret = btrfs_reloc_clone_csums(ordered);
1434
1435 /*
1436 * Only drop cache here, and process as normal.
1437 *
1438 * We must not allow extent_clear_unlock_delalloc()
1439 * at out_unlock label to free meta of this ordered
1440 * extent, as its meta should be freed by
1441 * btrfs_finish_ordered_io().
1442 *
1443 * So we must continue until @start is increased to
1444 * skip current ordered extent.
1445 */
1446 if (ret)
1447 btrfs_drop_extent_map_range(inode, start,
1448 start + cur_alloc_size - 1,
1449 false);
1450 }
1451 btrfs_put_ordered_extent(ordered);
1452
1453 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1454
1455 if (num_bytes < cur_alloc_size)
1456 num_bytes = 0;
1457 else
1458 num_bytes -= cur_alloc_size;
1459 alloc_hint = ins.objectid + ins.offset;
1460 start += cur_alloc_size;
1461 cur_alloc_size = 0;
1462
1463 /*
1464 * btrfs_reloc_clone_csums() error, since start is increased
1465 * extent_clear_unlock_delalloc() at out_unlock label won't
1466 * free metadata of current ordered extent, we're OK to exit.
1467 */
1468 if (ret)
1469 goto out_unlock;
1470 }
1471 extent_clear_unlock_delalloc(inode, orig_start, end, locked_folio, &cached,
1472 EXTENT_LOCKED | EXTENT_DELALLOC, page_ops);
1473 done:
1474 if (done_offset)
1475 *done_offset = end;
1476 return ret;
1477
1478 out_drop_extent_cache:
1479 btrfs_drop_extent_map_range(inode, start, start + cur_alloc_size - 1, false);
1480 out_reserve:
1481 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
1482 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
1483 out_unlock:
1484 /*
1485 * Now, we have three regions to clean up:
1486 *
1487 * |-------(1)----|---(2)---|-------------(3)----------|
1488 * `- orig_start `- start `- start + cur_alloc_size `- end
1489 *
1490 * We process each region below.
1491 */
1492
1493 /*
1494 * For the range (1). We have already instantiated the ordered extents
1495 * for this region. They are cleaned up by
1496 * btrfs_cleanup_ordered_extents() in e.g,
1497 * btrfs_run_delalloc_range().
1498 * EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV
1499 * are also handled by the cleanup function.
1500 *
1501 * So here we only clear EXTENT_LOCKED and EXTENT_DELALLOC flag, and
1502 * finish the writeback of the involved folios, which will be never submitted.
1503 */
1504 if (orig_start < start) {
1505 clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC;
1506 page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
1507
1508 if (!locked_folio)
1509 mapping_set_error(inode->vfs_inode.i_mapping, ret);
1510 extent_clear_unlock_delalloc(inode, orig_start, start - 1,
1511 locked_folio, NULL, clear_bits, page_ops);
1512 }
1513
1514 clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
1515 EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
1516 page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
1517
1518 /*
1519 * For the range (2). If we reserved an extent for our delalloc range
1520 * (or a subrange) and failed to create the respective ordered extent,
1521 * then it means that when we reserved the extent we decremented the
1522 * extent's size from the data space_info's bytes_may_use counter and
1523 * incremented the space_info's bytes_reserved counter by the same
1524 * amount. We must make sure extent_clear_unlock_delalloc() does not try
1525 * to decrement again the data space_info's bytes_may_use counter,
1526 * therefore we do not pass it the flag EXTENT_CLEAR_DATA_RESV.
1527 */
1528 if (cur_alloc_size) {
1529 extent_clear_unlock_delalloc(inode, start,
1530 start + cur_alloc_size - 1,
1531 locked_folio, &cached, clear_bits,
1532 page_ops);
1533 btrfs_qgroup_free_data(inode, NULL, start, cur_alloc_size, NULL);
1534 }
1535
1536 /*
1537 * For the range (3). We never touched the region. In addition to the
1538 * clear_bits above, we add EXTENT_CLEAR_DATA_RESV to release the data
1539 * space_info's bytes_may_use counter, reserved in
1540 * btrfs_check_data_free_space().
1541 */
1542 if (start + cur_alloc_size < end) {
1543 clear_bits |= EXTENT_CLEAR_DATA_RESV;
1544 extent_clear_unlock_delalloc(inode, start + cur_alloc_size,
1545 end, locked_folio,
1546 &cached, clear_bits, page_ops);
1547 btrfs_qgroup_free_data(inode, NULL, start + cur_alloc_size,
1548 end - start - cur_alloc_size + 1, NULL);
1549 }
1550 btrfs_err_rl(fs_info,
1551 "%s failed, root=%llu inode=%llu start=%llu len=%llu: %d",
1552 __func__, btrfs_root_id(inode->root),
1553 btrfs_ino(inode), orig_start, end + 1 - orig_start, ret);
1554 return ret;
1555 }
1556
1557 /*
1558 * Phase two of compressed writeback. This is the ordered portion of the code,
1559 * which only gets called in the order the work was queued. We walk all the
1560 * async extents created by compress_file_range and send them down to the disk.
1561 *
1562 * If called with @do_free == true then it'll try to finish the work and free
1563 * the work struct eventually.
1564 */
submit_compressed_extents(struct btrfs_work * work,bool do_free)1565 static noinline void submit_compressed_extents(struct btrfs_work *work, bool do_free)
1566 {
1567 struct async_chunk *async_chunk = container_of(work, struct async_chunk,
1568 work);
1569 struct btrfs_fs_info *fs_info = btrfs_work_owner(work);
1570 struct async_extent *async_extent;
1571 unsigned long nr_pages;
1572 u64 alloc_hint = 0;
1573
1574 if (do_free) {
1575 struct async_cow *async_cow;
1576
1577 btrfs_add_delayed_iput(async_chunk->inode);
1578 if (async_chunk->blkcg_css)
1579 css_put(async_chunk->blkcg_css);
1580
1581 async_cow = async_chunk->async_cow;
1582 if (atomic_dec_and_test(&async_cow->num_chunks))
1583 kvfree(async_cow);
1584 return;
1585 }
1586
1587 nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
1588 PAGE_SHIFT;
1589
1590 while (!list_empty(&async_chunk->extents)) {
1591 async_extent = list_entry(async_chunk->extents.next,
1592 struct async_extent, list);
1593 list_del(&async_extent->list);
1594 submit_one_async_extent(async_chunk, async_extent, &alloc_hint);
1595 }
1596
1597 /* atomic_sub_return implies a barrier */
1598 if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
1599 5 * SZ_1M)
1600 cond_wake_up_nomb(&fs_info->async_submit_wait);
1601 }
1602
run_delalloc_compressed(struct btrfs_inode * inode,struct folio * locked_folio,u64 start,u64 end,struct writeback_control * wbc)1603 static bool run_delalloc_compressed(struct btrfs_inode *inode,
1604 struct folio *locked_folio, u64 start,
1605 u64 end, struct writeback_control *wbc)
1606 {
1607 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1608 struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc);
1609 struct async_cow *ctx;
1610 struct async_chunk *async_chunk;
1611 unsigned long nr_pages;
1612 u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K);
1613 int i;
1614 unsigned nofs_flag;
1615 const blk_opf_t write_flags = wbc_to_write_flags(wbc);
1616
1617 nofs_flag = memalloc_nofs_save();
1618 ctx = kvmalloc(struct_size(ctx, chunks, num_chunks), GFP_KERNEL);
1619 memalloc_nofs_restore(nofs_flag);
1620 if (!ctx)
1621 return false;
1622
1623 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags);
1624
1625 async_chunk = ctx->chunks;
1626 atomic_set(&ctx->num_chunks, num_chunks);
1627
1628 for (i = 0; i < num_chunks; i++) {
1629 u64 cur_end = min(end, start + SZ_512K - 1);
1630
1631 /*
1632 * igrab is called higher up in the call chain, take only the
1633 * lightweight reference for the callback lifetime
1634 */
1635 ihold(&inode->vfs_inode);
1636 async_chunk[i].async_cow = ctx;
1637 async_chunk[i].inode = inode;
1638 async_chunk[i].start = start;
1639 async_chunk[i].end = cur_end;
1640 async_chunk[i].write_flags = write_flags;
1641 INIT_LIST_HEAD(&async_chunk[i].extents);
1642
1643 /*
1644 * The locked_folio comes all the way from writepage and its
1645 * the original folio we were actually given. As we spread
1646 * this large delalloc region across multiple async_chunk
1647 * structs, only the first struct needs a pointer to
1648 * locked_folio.
1649 *
1650 * This way we don't need racey decisions about who is supposed
1651 * to unlock it.
1652 */
1653 if (locked_folio) {
1654 /*
1655 * Depending on the compressibility, the pages might or
1656 * might not go through async. We want all of them to
1657 * be accounted against wbc once. Let's do it here
1658 * before the paths diverge. wbc accounting is used
1659 * only for foreign writeback detection and doesn't
1660 * need full accuracy. Just account the whole thing
1661 * against the first page.
1662 */
1663 wbc_account_cgroup_owner(wbc, locked_folio,
1664 cur_end - start);
1665 async_chunk[i].locked_folio = locked_folio;
1666 locked_folio = NULL;
1667 } else {
1668 async_chunk[i].locked_folio = NULL;
1669 }
1670
1671 if (blkcg_css != blkcg_root_css) {
1672 css_get(blkcg_css);
1673 async_chunk[i].blkcg_css = blkcg_css;
1674 async_chunk[i].write_flags |= REQ_BTRFS_CGROUP_PUNT;
1675 } else {
1676 async_chunk[i].blkcg_css = NULL;
1677 }
1678
1679 btrfs_init_work(&async_chunk[i].work, compress_file_range,
1680 submit_compressed_extents);
1681
1682 nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
1683 atomic_add(nr_pages, &fs_info->async_delalloc_pages);
1684
1685 btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work);
1686
1687 start = cur_end + 1;
1688 }
1689 return true;
1690 }
1691
1692 /*
1693 * Run the delalloc range from start to end, and write back any dirty pages
1694 * covered by the range.
1695 */
run_delalloc_cow(struct btrfs_inode * inode,struct folio * locked_folio,u64 start,u64 end,struct writeback_control * wbc,bool pages_dirty)1696 static noinline int run_delalloc_cow(struct btrfs_inode *inode,
1697 struct folio *locked_folio, u64 start,
1698 u64 end, struct writeback_control *wbc,
1699 bool pages_dirty)
1700 {
1701 u64 done_offset = end;
1702 int ret;
1703
1704 while (start <= end) {
1705 ret = cow_file_range(inode, locked_folio, start, end,
1706 &done_offset, true, false);
1707 if (ret)
1708 return ret;
1709 extent_write_locked_range(&inode->vfs_inode, locked_folio,
1710 start, done_offset, wbc, pages_dirty);
1711 start = done_offset + 1;
1712 }
1713
1714 return 1;
1715 }
1716
fallback_to_cow(struct btrfs_inode * inode,struct folio * locked_folio,const u64 start,const u64 end)1717 static int fallback_to_cow(struct btrfs_inode *inode,
1718 struct folio *locked_folio, const u64 start,
1719 const u64 end)
1720 {
1721 const bool is_space_ino = btrfs_is_free_space_inode(inode);
1722 const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root);
1723 const u64 range_bytes = end + 1 - start;
1724 struct extent_io_tree *io_tree = &inode->io_tree;
1725 struct extent_state *cached_state = NULL;
1726 u64 range_start = start;
1727 u64 count;
1728 int ret;
1729
1730 /*
1731 * If EXTENT_NORESERVE is set it means that when the buffered write was
1732 * made we had not enough available data space and therefore we did not
1733 * reserve data space for it, since we though we could do NOCOW for the
1734 * respective file range (either there is prealloc extent or the inode
1735 * has the NOCOW bit set).
1736 *
1737 * However when we need to fallback to COW mode (because for example the
1738 * block group for the corresponding extent was turned to RO mode by a
1739 * scrub or relocation) we need to do the following:
1740 *
1741 * 1) We increment the bytes_may_use counter of the data space info.
1742 * If COW succeeds, it allocates a new data extent and after doing
1743 * that it decrements the space info's bytes_may_use counter and
1744 * increments its bytes_reserved counter by the same amount (we do
1745 * this at btrfs_add_reserved_bytes()). So we need to increment the
1746 * bytes_may_use counter to compensate (when space is reserved at
1747 * buffered write time, the bytes_may_use counter is incremented);
1748 *
1749 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so
1750 * that if the COW path fails for any reason, it decrements (through
1751 * extent_clear_unlock_delalloc()) the bytes_may_use counter of the
1752 * data space info, which we incremented in the step above.
1753 *
1754 * If we need to fallback to cow and the inode corresponds to a free
1755 * space cache inode or an inode of the data relocation tree, we must
1756 * also increment bytes_may_use of the data space_info for the same
1757 * reason. Space caches and relocated data extents always get a prealloc
1758 * extent for them, however scrub or balance may have set the block
1759 * group that contains that extent to RO mode and therefore force COW
1760 * when starting writeback.
1761 */
1762 lock_extent(io_tree, start, end, &cached_state);
1763 count = count_range_bits(io_tree, &range_start, end, range_bytes,
1764 EXTENT_NORESERVE, 0, NULL);
1765 if (count > 0 || is_space_ino || is_reloc_ino) {
1766 u64 bytes = count;
1767 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1768 struct btrfs_space_info *sinfo = fs_info->data_sinfo;
1769
1770 if (is_space_ino || is_reloc_ino)
1771 bytes = range_bytes;
1772
1773 spin_lock(&sinfo->lock);
1774 btrfs_space_info_update_bytes_may_use(sinfo, bytes);
1775 spin_unlock(&sinfo->lock);
1776
1777 if (count > 0)
1778 clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE,
1779 NULL);
1780 }
1781 unlock_extent(io_tree, start, end, &cached_state);
1782
1783 /*
1784 * Don't try to create inline extents, as a mix of inline extent that
1785 * is written out and unlocked directly and a normal NOCOW extent
1786 * doesn't work.
1787 */
1788 ret = cow_file_range(inode, locked_folio, start, end, NULL, false,
1789 true);
1790 ASSERT(ret != 1);
1791 return ret;
1792 }
1793
1794 struct can_nocow_file_extent_args {
1795 /* Input fields. */
1796
1797 /* Start file offset of the range we want to NOCOW. */
1798 u64 start;
1799 /* End file offset (inclusive) of the range we want to NOCOW. */
1800 u64 end;
1801 bool writeback_path;
1802 /*
1803 * Free the path passed to can_nocow_file_extent() once it's not needed
1804 * anymore.
1805 */
1806 bool free_path;
1807
1808 /*
1809 * Output fields. Only set when can_nocow_file_extent() returns 1.
1810 * The expected file extent for the NOCOW write.
1811 */
1812 struct btrfs_file_extent file_extent;
1813 };
1814
1815 /*
1816 * Check if we can NOCOW the file extent that the path points to.
1817 * This function may return with the path released, so the caller should check
1818 * if path->nodes[0] is NULL or not if it needs to use the path afterwards.
1819 *
1820 * Returns: < 0 on error
1821 * 0 if we can not NOCOW
1822 * 1 if we can NOCOW
1823 */
can_nocow_file_extent(struct btrfs_path * path,struct btrfs_key * key,struct btrfs_inode * inode,struct can_nocow_file_extent_args * args)1824 static int can_nocow_file_extent(struct btrfs_path *path,
1825 struct btrfs_key *key,
1826 struct btrfs_inode *inode,
1827 struct can_nocow_file_extent_args *args)
1828 {
1829 const bool is_freespace_inode = btrfs_is_free_space_inode(inode);
1830 struct extent_buffer *leaf = path->nodes[0];
1831 struct btrfs_root *root = inode->root;
1832 struct btrfs_file_extent_item *fi;
1833 struct btrfs_root *csum_root;
1834 u64 io_start;
1835 u64 extent_end;
1836 u8 extent_type;
1837 int can_nocow = 0;
1838 int ret = 0;
1839 bool nowait = path->nowait;
1840
1841 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
1842 extent_type = btrfs_file_extent_type(leaf, fi);
1843
1844 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
1845 goto out;
1846
1847 if (!(inode->flags & BTRFS_INODE_NODATACOW) &&
1848 extent_type == BTRFS_FILE_EXTENT_REG)
1849 goto out;
1850
1851 /*
1852 * If the extent was created before the generation where the last snapshot
1853 * for its subvolume was created, then this implies the extent is shared,
1854 * hence we must COW.
1855 */
1856 if (btrfs_file_extent_generation(leaf, fi) <=
1857 btrfs_root_last_snapshot(&root->root_item))
1858 goto out;
1859
1860 /* An explicit hole, must COW. */
1861 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
1862 goto out;
1863
1864 /* Compressed/encrypted/encoded extents must be COWed. */
1865 if (btrfs_file_extent_compression(leaf, fi) ||
1866 btrfs_file_extent_encryption(leaf, fi) ||
1867 btrfs_file_extent_other_encoding(leaf, fi))
1868 goto out;
1869
1870 extent_end = btrfs_file_extent_end(path);
1871
1872 args->file_extent.disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1873 args->file_extent.disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1874 args->file_extent.ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1875 args->file_extent.offset = btrfs_file_extent_offset(leaf, fi);
1876 args->file_extent.compression = btrfs_file_extent_compression(leaf, fi);
1877
1878 /*
1879 * The following checks can be expensive, as they need to take other
1880 * locks and do btree or rbtree searches, so release the path to avoid
1881 * blocking other tasks for too long.
1882 */
1883 btrfs_release_path(path);
1884
1885 ret = btrfs_cross_ref_exist(inode, key->offset - args->file_extent.offset,
1886 args->file_extent.disk_bytenr, path);
1887 WARN_ON_ONCE(ret > 0 && is_freespace_inode);
1888 if (ret != 0)
1889 goto out;
1890
1891 if (args->free_path) {
1892 /*
1893 * We don't need the path anymore, plus through the
1894 * btrfs_lookup_csums_list() call below we will end up allocating
1895 * another path. So free the path to avoid unnecessary extra
1896 * memory usage.
1897 */
1898 btrfs_free_path(path);
1899 path = NULL;
1900 }
1901
1902 /* If there are pending snapshots for this root, we must COW. */
1903 if (args->writeback_path && !is_freespace_inode &&
1904 atomic_read(&root->snapshot_force_cow))
1905 goto out;
1906
1907 args->file_extent.num_bytes = min(args->end + 1, extent_end) - args->start;
1908 args->file_extent.offset += args->start - key->offset;
1909 io_start = args->file_extent.disk_bytenr + args->file_extent.offset;
1910
1911 /*
1912 * Force COW if csums exist in the range. This ensures that csums for a
1913 * given extent are either valid or do not exist.
1914 */
1915
1916 csum_root = btrfs_csum_root(root->fs_info, io_start);
1917 ret = btrfs_lookup_csums_list(csum_root, io_start,
1918 io_start + args->file_extent.num_bytes - 1,
1919 NULL, nowait);
1920 WARN_ON_ONCE(ret > 0 && is_freespace_inode);
1921 if (ret != 0)
1922 goto out;
1923
1924 can_nocow = 1;
1925 out:
1926 if (args->free_path && path)
1927 btrfs_free_path(path);
1928
1929 return ret < 0 ? ret : can_nocow;
1930 }
1931
1932 /*
1933 * Cleanup the dirty folios which will never be submitted due to error.
1934 *
1935 * When running a delalloc range, we may need to split the ranges (due to
1936 * fragmentation or NOCOW). If we hit an error in the later part, we will error
1937 * out and previously successfully executed range will never be submitted, thus
1938 * we have to cleanup those folios by clearing their dirty flag, starting and
1939 * finishing the writeback.
1940 */
cleanup_dirty_folios(struct btrfs_inode * inode,struct folio * locked_folio,u64 start,u64 end,int error)1941 static void cleanup_dirty_folios(struct btrfs_inode *inode,
1942 struct folio *locked_folio,
1943 u64 start, u64 end, int error)
1944 {
1945 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1946 struct address_space *mapping = inode->vfs_inode.i_mapping;
1947 pgoff_t start_index = start >> PAGE_SHIFT;
1948 pgoff_t end_index = end >> PAGE_SHIFT;
1949 u32 len;
1950
1951 ASSERT(end + 1 - start < U32_MAX);
1952 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
1953 IS_ALIGNED(end + 1, fs_info->sectorsize));
1954 len = end + 1 - start;
1955
1956 /*
1957 * Handle the locked folio first.
1958 * The btrfs_folio_clamp_*() helpers can handle range out of the folio case.
1959 */
1960 btrfs_folio_clamp_finish_io(fs_info, locked_folio, start, len);
1961
1962 for (pgoff_t index = start_index; index <= end_index; index++) {
1963 struct folio *folio;
1964
1965 /* Already handled at the beginning. */
1966 if (index == locked_folio->index)
1967 continue;
1968 folio = __filemap_get_folio(mapping, index, FGP_LOCK, GFP_NOFS);
1969 /* Cache already dropped, no need to do any cleanup. */
1970 if (IS_ERR(folio))
1971 continue;
1972 btrfs_folio_clamp_finish_io(fs_info, locked_folio, start, len);
1973 folio_unlock(folio);
1974 folio_put(folio);
1975 }
1976 mapping_set_error(mapping, error);
1977 }
1978
1979 /*
1980 * when nowcow writeback call back. This checks for snapshots or COW copies
1981 * of the extents that exist in the file, and COWs the file as required.
1982 *
1983 * If no cow copies or snapshots exist, we write directly to the existing
1984 * blocks on disk
1985 */
run_delalloc_nocow(struct btrfs_inode * inode,struct folio * locked_folio,const u64 start,const u64 end)1986 static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
1987 struct folio *locked_folio,
1988 const u64 start, const u64 end)
1989 {
1990 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1991 struct btrfs_root *root = inode->root;
1992 struct btrfs_path *path;
1993 u64 cow_start = (u64)-1;
1994 /*
1995 * If not 0, represents the inclusive end of the last fallback_to_cow()
1996 * range. Only for error handling.
1997 */
1998 u64 cow_end = 0;
1999 u64 cur_offset = start;
2000 int ret;
2001 bool check_prev = true;
2002 u64 ino = btrfs_ino(inode);
2003 struct can_nocow_file_extent_args nocow_args = { 0 };
2004
2005 /*
2006 * Normally on a zoned device we're only doing COW writes, but in case
2007 * of relocation on a zoned filesystem serializes I/O so that we're only
2008 * writing sequentially and can end up here as well.
2009 */
2010 ASSERT(!btrfs_is_zoned(fs_info) || btrfs_is_data_reloc_root(root));
2011
2012 path = btrfs_alloc_path();
2013 if (!path) {
2014 ret = -ENOMEM;
2015 goto error;
2016 }
2017
2018 nocow_args.end = end;
2019 nocow_args.writeback_path = true;
2020
2021 while (cur_offset <= end) {
2022 struct btrfs_block_group *nocow_bg = NULL;
2023 struct btrfs_ordered_extent *ordered;
2024 struct btrfs_key found_key;
2025 struct btrfs_file_extent_item *fi;
2026 struct extent_buffer *leaf;
2027 struct extent_state *cached_state = NULL;
2028 u64 extent_end;
2029 u64 nocow_end;
2030 int extent_type;
2031 bool is_prealloc;
2032
2033 ret = btrfs_lookup_file_extent(NULL, root, path, ino,
2034 cur_offset, 0);
2035 if (ret < 0)
2036 goto error;
2037
2038 /*
2039 * If there is no extent for our range when doing the initial
2040 * search, then go back to the previous slot as it will be the
2041 * one containing the search offset
2042 */
2043 if (ret > 0 && path->slots[0] > 0 && check_prev) {
2044 leaf = path->nodes[0];
2045 btrfs_item_key_to_cpu(leaf, &found_key,
2046 path->slots[0] - 1);
2047 if (found_key.objectid == ino &&
2048 found_key.type == BTRFS_EXTENT_DATA_KEY)
2049 path->slots[0]--;
2050 }
2051 check_prev = false;
2052 next_slot:
2053 /* Go to next leaf if we have exhausted the current one */
2054 leaf = path->nodes[0];
2055 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2056 ret = btrfs_next_leaf(root, path);
2057 if (ret < 0)
2058 goto error;
2059 if (ret > 0)
2060 break;
2061 leaf = path->nodes[0];
2062 }
2063
2064 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2065
2066 /* Didn't find anything for our INO */
2067 if (found_key.objectid > ino)
2068 break;
2069 /*
2070 * Keep searching until we find an EXTENT_ITEM or there are no
2071 * more extents for this inode
2072 */
2073 if (WARN_ON_ONCE(found_key.objectid < ino) ||
2074 found_key.type < BTRFS_EXTENT_DATA_KEY) {
2075 path->slots[0]++;
2076 goto next_slot;
2077 }
2078
2079 /* Found key is not EXTENT_DATA_KEY or starts after req range */
2080 if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
2081 found_key.offset > end)
2082 break;
2083
2084 /*
2085 * If the found extent starts after requested offset, then
2086 * adjust extent_end to be right before this extent begins
2087 */
2088 if (found_key.offset > cur_offset) {
2089 extent_end = found_key.offset;
2090 extent_type = 0;
2091 goto must_cow;
2092 }
2093
2094 /*
2095 * Found extent which begins before our range and potentially
2096 * intersect it
2097 */
2098 fi = btrfs_item_ptr(leaf, path->slots[0],
2099 struct btrfs_file_extent_item);
2100 extent_type = btrfs_file_extent_type(leaf, fi);
2101 /* If this is triggered then we have a memory corruption. */
2102 ASSERT(extent_type < BTRFS_NR_FILE_EXTENT_TYPES);
2103 if (WARN_ON(extent_type >= BTRFS_NR_FILE_EXTENT_TYPES)) {
2104 ret = -EUCLEAN;
2105 goto error;
2106 }
2107 extent_end = btrfs_file_extent_end(path);
2108
2109 /*
2110 * If the extent we got ends before our current offset, skip to
2111 * the next extent.
2112 */
2113 if (extent_end <= cur_offset) {
2114 path->slots[0]++;
2115 goto next_slot;
2116 }
2117
2118 nocow_args.start = cur_offset;
2119 ret = can_nocow_file_extent(path, &found_key, inode, &nocow_args);
2120 if (ret < 0)
2121 goto error;
2122 if (ret == 0)
2123 goto must_cow;
2124
2125 ret = 0;
2126 nocow_bg = btrfs_inc_nocow_writers(fs_info,
2127 nocow_args.file_extent.disk_bytenr +
2128 nocow_args.file_extent.offset);
2129 if (!nocow_bg) {
2130 must_cow:
2131 /*
2132 * If we can't perform NOCOW writeback for the range,
2133 * then record the beginning of the range that needs to
2134 * be COWed. It will be written out before the next
2135 * NOCOW range if we find one, or when exiting this
2136 * loop.
2137 */
2138 if (cow_start == (u64)-1)
2139 cow_start = cur_offset;
2140 cur_offset = extent_end;
2141 if (cur_offset > end)
2142 break;
2143 if (!path->nodes[0])
2144 continue;
2145 path->slots[0]++;
2146 goto next_slot;
2147 }
2148
2149 /*
2150 * COW range from cow_start to found_key.offset - 1. As the key
2151 * will contain the beginning of the first extent that can be
2152 * NOCOW, following one which needs to be COW'ed
2153 */
2154 if (cow_start != (u64)-1) {
2155 ret = fallback_to_cow(inode, locked_folio, cow_start,
2156 found_key.offset - 1);
2157 cow_start = (u64)-1;
2158 if (ret) {
2159 cow_end = found_key.offset - 1;
2160 btrfs_dec_nocow_writers(nocow_bg);
2161 goto error;
2162 }
2163 }
2164
2165 nocow_end = cur_offset + nocow_args.file_extent.num_bytes - 1;
2166 lock_extent(&inode->io_tree, cur_offset, nocow_end, &cached_state);
2167
2168 is_prealloc = extent_type == BTRFS_FILE_EXTENT_PREALLOC;
2169 if (is_prealloc) {
2170 struct extent_map *em;
2171
2172 em = btrfs_create_io_em(inode, cur_offset,
2173 &nocow_args.file_extent,
2174 BTRFS_ORDERED_PREALLOC);
2175 if (IS_ERR(em)) {
2176 unlock_extent(&inode->io_tree, cur_offset,
2177 nocow_end, &cached_state);
2178 btrfs_dec_nocow_writers(nocow_bg);
2179 ret = PTR_ERR(em);
2180 goto error;
2181 }
2182 free_extent_map(em);
2183 }
2184
2185 ordered = btrfs_alloc_ordered_extent(inode, cur_offset,
2186 &nocow_args.file_extent,
2187 is_prealloc
2188 ? (1 << BTRFS_ORDERED_PREALLOC)
2189 : (1 << BTRFS_ORDERED_NOCOW));
2190 btrfs_dec_nocow_writers(nocow_bg);
2191 if (IS_ERR(ordered)) {
2192 if (is_prealloc) {
2193 btrfs_drop_extent_map_range(inode, cur_offset,
2194 nocow_end, false);
2195 }
2196 unlock_extent(&inode->io_tree, cur_offset,
2197 nocow_end, &cached_state);
2198 ret = PTR_ERR(ordered);
2199 goto error;
2200 }
2201
2202 if (btrfs_is_data_reloc_root(root))
2203 /*
2204 * Error handled later, as we must prevent
2205 * extent_clear_unlock_delalloc() in error handler
2206 * from freeing metadata of created ordered extent.
2207 */
2208 ret = btrfs_reloc_clone_csums(ordered);
2209 btrfs_put_ordered_extent(ordered);
2210
2211 extent_clear_unlock_delalloc(inode, cur_offset, nocow_end,
2212 locked_folio, &cached_state,
2213 EXTENT_LOCKED | EXTENT_DELALLOC |
2214 EXTENT_CLEAR_DATA_RESV,
2215 PAGE_UNLOCK | PAGE_SET_ORDERED);
2216
2217 cur_offset = extent_end;
2218
2219 /*
2220 * btrfs_reloc_clone_csums() error, now we're OK to call error
2221 * handler, as metadata for created ordered extent will only
2222 * be freed by btrfs_finish_ordered_io().
2223 */
2224 if (ret)
2225 goto error;
2226 }
2227 btrfs_release_path(path);
2228
2229 if (cur_offset <= end && cow_start == (u64)-1)
2230 cow_start = cur_offset;
2231
2232 if (cow_start != (u64)-1) {
2233 ret = fallback_to_cow(inode, locked_folio, cow_start, end);
2234 cow_start = (u64)-1;
2235 if (ret) {
2236 cow_end = end;
2237 goto error;
2238 }
2239 }
2240
2241 btrfs_free_path(path);
2242 return 0;
2243
2244 error:
2245 /*
2246 * There are several error cases:
2247 *
2248 * 1) Failed without falling back to COW
2249 * start cur_offset end
2250 * |/////////////| |
2251 *
2252 * For range [start, cur_offset) the folios are already unlocked (except
2253 * @locked_folio), EXTENT_DELALLOC already removed.
2254 * Only need to clear the dirty flag as they will never be submitted.
2255 * Ordered extent and extent maps are handled by
2256 * btrfs_mark_ordered_io_finished() inside run_delalloc_range().
2257 *
2258 * 2) Failed with error from fallback_to_cow()
2259 * start cur_offset cow_end end
2260 * |/////////////|-----------| |
2261 *
2262 * For range [start, cur_offset) it's the same as case 1).
2263 * But for range [cur_offset, cow_end), the folios have dirty flag
2264 * cleared and unlocked, EXTENT_DEALLLOC cleared by cow_file_range().
2265 *
2266 * Thus we should not call extent_clear_unlock_delalloc() on range
2267 * [cur_offset, cow_end), as the folios are already unlocked.
2268 *
2269 * So clear the folio dirty flags for [start, cur_offset) first.
2270 */
2271 if (cur_offset > start)
2272 cleanup_dirty_folios(inode, locked_folio, start, cur_offset - 1, ret);
2273
2274 /*
2275 * If an error happened while a COW region is outstanding, cur_offset
2276 * needs to be reset to @cow_end + 1 to skip the COW range, as
2277 * cow_file_range() will do the proper cleanup at error.
2278 */
2279 if (cow_end)
2280 cur_offset = cow_end + 1;
2281
2282 /*
2283 * We need to lock the extent here because we're clearing DELALLOC and
2284 * we're not locked at this point.
2285 */
2286 if (cur_offset < end) {
2287 struct extent_state *cached = NULL;
2288
2289 lock_extent(&inode->io_tree, cur_offset, end, &cached);
2290 extent_clear_unlock_delalloc(inode, cur_offset, end,
2291 locked_folio, &cached,
2292 EXTENT_LOCKED | EXTENT_DELALLOC |
2293 EXTENT_DEFRAG |
2294 EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
2295 PAGE_START_WRITEBACK |
2296 PAGE_END_WRITEBACK);
2297 btrfs_qgroup_free_data(inode, NULL, cur_offset, end - cur_offset + 1, NULL);
2298 }
2299 btrfs_free_path(path);
2300 btrfs_err_rl(fs_info,
2301 "%s failed, root=%llu inode=%llu start=%llu len=%llu: %d",
2302 __func__, btrfs_root_id(inode->root),
2303 btrfs_ino(inode), start, end + 1 - start, ret);
2304 return ret;
2305 }
2306
should_nocow(struct btrfs_inode * inode,u64 start,u64 end)2307 static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end)
2308 {
2309 if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) {
2310 if (inode->defrag_bytes &&
2311 test_range_bit_exists(&inode->io_tree, start, end, EXTENT_DEFRAG))
2312 return false;
2313 return true;
2314 }
2315 return false;
2316 }
2317
2318 /*
2319 * Function to process delayed allocation (create CoW) for ranges which are
2320 * being touched for the first time.
2321 */
btrfs_run_delalloc_range(struct btrfs_inode * inode,struct folio * locked_folio,u64 start,u64 end,struct writeback_control * wbc)2322 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_folio,
2323 u64 start, u64 end, struct writeback_control *wbc)
2324 {
2325 const bool zoned = btrfs_is_zoned(inode->root->fs_info);
2326 int ret;
2327
2328 /*
2329 * The range must cover part of the @locked_folio, or a return of 1
2330 * can confuse the caller.
2331 */
2332 ASSERT(!(end <= folio_pos(locked_folio) ||
2333 start >= folio_pos(locked_folio) + folio_size(locked_folio)));
2334
2335 if (should_nocow(inode, start, end)) {
2336 ret = run_delalloc_nocow(inode, locked_folio, start, end);
2337 goto out;
2338 }
2339
2340 if (btrfs_inode_can_compress(inode) &&
2341 inode_need_compress(inode, start, end) &&
2342 run_delalloc_compressed(inode, locked_folio, start, end, wbc))
2343 return 1;
2344
2345 if (zoned)
2346 ret = run_delalloc_cow(inode, locked_folio, start, end, wbc,
2347 true);
2348 else
2349 ret = cow_file_range(inode, locked_folio, start, end, NULL,
2350 false, false);
2351
2352 out:
2353 if (ret < 0)
2354 btrfs_cleanup_ordered_extents(inode, start, end - start + 1);
2355 return ret;
2356 }
2357
btrfs_split_delalloc_extent(struct btrfs_inode * inode,struct extent_state * orig,u64 split)2358 void btrfs_split_delalloc_extent(struct btrfs_inode *inode,
2359 struct extent_state *orig, u64 split)
2360 {
2361 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2362 u64 size;
2363
2364 lockdep_assert_held(&inode->io_tree.lock);
2365
2366 /* not delalloc, ignore it */
2367 if (!(orig->state & EXTENT_DELALLOC))
2368 return;
2369
2370 size = orig->end - orig->start + 1;
2371 if (size > fs_info->max_extent_size) {
2372 u32 num_extents;
2373 u64 new_size;
2374
2375 /*
2376 * See the explanation in btrfs_merge_delalloc_extent, the same
2377 * applies here, just in reverse.
2378 */
2379 new_size = orig->end - split + 1;
2380 num_extents = count_max_extents(fs_info, new_size);
2381 new_size = split - orig->start;
2382 num_extents += count_max_extents(fs_info, new_size);
2383 if (count_max_extents(fs_info, size) >= num_extents)
2384 return;
2385 }
2386
2387 spin_lock(&inode->lock);
2388 btrfs_mod_outstanding_extents(inode, 1);
2389 spin_unlock(&inode->lock);
2390 }
2391
2392 /*
2393 * Handle merged delayed allocation extents so we can keep track of new extents
2394 * that are just merged onto old extents, such as when we are doing sequential
2395 * writes, so we can properly account for the metadata space we'll need.
2396 */
btrfs_merge_delalloc_extent(struct btrfs_inode * inode,struct extent_state * new,struct extent_state * other)2397 void btrfs_merge_delalloc_extent(struct btrfs_inode *inode, struct extent_state *new,
2398 struct extent_state *other)
2399 {
2400 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2401 u64 new_size, old_size;
2402 u32 num_extents;
2403
2404 lockdep_assert_held(&inode->io_tree.lock);
2405
2406 /* not delalloc, ignore it */
2407 if (!(other->state & EXTENT_DELALLOC))
2408 return;
2409
2410 if (new->start > other->start)
2411 new_size = new->end - other->start + 1;
2412 else
2413 new_size = other->end - new->start + 1;
2414
2415 /* we're not bigger than the max, unreserve the space and go */
2416 if (new_size <= fs_info->max_extent_size) {
2417 spin_lock(&inode->lock);
2418 btrfs_mod_outstanding_extents(inode, -1);
2419 spin_unlock(&inode->lock);
2420 return;
2421 }
2422
2423 /*
2424 * We have to add up either side to figure out how many extents were
2425 * accounted for before we merged into one big extent. If the number of
2426 * extents we accounted for is <= the amount we need for the new range
2427 * then we can return, otherwise drop. Think of it like this
2428 *
2429 * [ 4k][MAX_SIZE]
2430 *
2431 * So we've grown the extent by a MAX_SIZE extent, this would mean we
2432 * need 2 outstanding extents, on one side we have 1 and the other side
2433 * we have 1 so they are == and we can return. But in this case
2434 *
2435 * [MAX_SIZE+4k][MAX_SIZE+4k]
2436 *
2437 * Each range on their own accounts for 2 extents, but merged together
2438 * they are only 3 extents worth of accounting, so we need to drop in
2439 * this case.
2440 */
2441 old_size = other->end - other->start + 1;
2442 num_extents = count_max_extents(fs_info, old_size);
2443 old_size = new->end - new->start + 1;
2444 num_extents += count_max_extents(fs_info, old_size);
2445 if (count_max_extents(fs_info, new_size) >= num_extents)
2446 return;
2447
2448 spin_lock(&inode->lock);
2449 btrfs_mod_outstanding_extents(inode, -1);
2450 spin_unlock(&inode->lock);
2451 }
2452
btrfs_add_delalloc_inode(struct btrfs_inode * inode)2453 static void btrfs_add_delalloc_inode(struct btrfs_inode *inode)
2454 {
2455 struct btrfs_root *root = inode->root;
2456 struct btrfs_fs_info *fs_info = root->fs_info;
2457
2458 spin_lock(&root->delalloc_lock);
2459 ASSERT(list_empty(&inode->delalloc_inodes));
2460 list_add_tail(&inode->delalloc_inodes, &root->delalloc_inodes);
2461 root->nr_delalloc_inodes++;
2462 if (root->nr_delalloc_inodes == 1) {
2463 spin_lock(&fs_info->delalloc_root_lock);
2464 ASSERT(list_empty(&root->delalloc_root));
2465 list_add_tail(&root->delalloc_root, &fs_info->delalloc_roots);
2466 spin_unlock(&fs_info->delalloc_root_lock);
2467 }
2468 spin_unlock(&root->delalloc_lock);
2469 }
2470
btrfs_del_delalloc_inode(struct btrfs_inode * inode)2471 void btrfs_del_delalloc_inode(struct btrfs_inode *inode)
2472 {
2473 struct btrfs_root *root = inode->root;
2474 struct btrfs_fs_info *fs_info = root->fs_info;
2475
2476 lockdep_assert_held(&root->delalloc_lock);
2477
2478 /*
2479 * We may be called after the inode was already deleted from the list,
2480 * namely in the transaction abort path btrfs_destroy_delalloc_inodes(),
2481 * and then later through btrfs_clear_delalloc_extent() while the inode
2482 * still has ->delalloc_bytes > 0.
2483 */
2484 if (!list_empty(&inode->delalloc_inodes)) {
2485 list_del_init(&inode->delalloc_inodes);
2486 root->nr_delalloc_inodes--;
2487 if (!root->nr_delalloc_inodes) {
2488 ASSERT(list_empty(&root->delalloc_inodes));
2489 spin_lock(&fs_info->delalloc_root_lock);
2490 ASSERT(!list_empty(&root->delalloc_root));
2491 list_del_init(&root->delalloc_root);
2492 spin_unlock(&fs_info->delalloc_root_lock);
2493 }
2494 }
2495 }
2496
2497 /*
2498 * Properly track delayed allocation bytes in the inode and to maintain the
2499 * list of inodes that have pending delalloc work to be done.
2500 */
btrfs_set_delalloc_extent(struct btrfs_inode * inode,struct extent_state * state,u32 bits)2501 void btrfs_set_delalloc_extent(struct btrfs_inode *inode, struct extent_state *state,
2502 u32 bits)
2503 {
2504 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2505
2506 lockdep_assert_held(&inode->io_tree.lock);
2507
2508 if ((bits & EXTENT_DEFRAG) && !(bits & EXTENT_DELALLOC))
2509 WARN_ON(1);
2510 /*
2511 * set_bit and clear bit hooks normally require _irqsave/restore
2512 * but in this case, we are only testing for the DELALLOC
2513 * bit, which is only set or cleared with irqs on
2514 */
2515 if (!(state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
2516 u64 len = state->end + 1 - state->start;
2517 u64 prev_delalloc_bytes;
2518 u32 num_extents = count_max_extents(fs_info, len);
2519
2520 spin_lock(&inode->lock);
2521 btrfs_mod_outstanding_extents(inode, num_extents);
2522 spin_unlock(&inode->lock);
2523
2524 /* For sanity tests */
2525 if (btrfs_is_testing(fs_info))
2526 return;
2527
2528 percpu_counter_add_batch(&fs_info->delalloc_bytes, len,
2529 fs_info->delalloc_batch);
2530 spin_lock(&inode->lock);
2531 prev_delalloc_bytes = inode->delalloc_bytes;
2532 inode->delalloc_bytes += len;
2533 if (bits & EXTENT_DEFRAG)
2534 inode->defrag_bytes += len;
2535 spin_unlock(&inode->lock);
2536
2537 /*
2538 * We don't need to be under the protection of the inode's lock,
2539 * because we are called while holding the inode's io_tree lock
2540 * and are therefore protected against concurrent calls of this
2541 * function and btrfs_clear_delalloc_extent().
2542 */
2543 if (!btrfs_is_free_space_inode(inode) && prev_delalloc_bytes == 0)
2544 btrfs_add_delalloc_inode(inode);
2545 }
2546
2547 if (!(state->state & EXTENT_DELALLOC_NEW) &&
2548 (bits & EXTENT_DELALLOC_NEW)) {
2549 spin_lock(&inode->lock);
2550 inode->new_delalloc_bytes += state->end + 1 - state->start;
2551 spin_unlock(&inode->lock);
2552 }
2553 }
2554
2555 /*
2556 * Once a range is no longer delalloc this function ensures that proper
2557 * accounting happens.
2558 */
btrfs_clear_delalloc_extent(struct btrfs_inode * inode,struct extent_state * state,u32 bits)2559 void btrfs_clear_delalloc_extent(struct btrfs_inode *inode,
2560 struct extent_state *state, u32 bits)
2561 {
2562 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2563 u64 len = state->end + 1 - state->start;
2564 u32 num_extents = count_max_extents(fs_info, len);
2565
2566 lockdep_assert_held(&inode->io_tree.lock);
2567
2568 if ((state->state & EXTENT_DEFRAG) && (bits & EXTENT_DEFRAG)) {
2569 spin_lock(&inode->lock);
2570 inode->defrag_bytes -= len;
2571 spin_unlock(&inode->lock);
2572 }
2573
2574 /*
2575 * set_bit and clear bit hooks normally require _irqsave/restore
2576 * but in this case, we are only testing for the DELALLOC
2577 * bit, which is only set or cleared with irqs on
2578 */
2579 if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
2580 struct btrfs_root *root = inode->root;
2581 u64 new_delalloc_bytes;
2582
2583 spin_lock(&inode->lock);
2584 btrfs_mod_outstanding_extents(inode, -num_extents);
2585 spin_unlock(&inode->lock);
2586
2587 /*
2588 * We don't reserve metadata space for space cache inodes so we
2589 * don't need to call delalloc_release_metadata if there is an
2590 * error.
2591 */
2592 if (bits & EXTENT_CLEAR_META_RESV &&
2593 root != fs_info->tree_root)
2594 btrfs_delalloc_release_metadata(inode, len, true);
2595
2596 /* For sanity tests. */
2597 if (btrfs_is_testing(fs_info))
2598 return;
2599
2600 if (!btrfs_is_data_reloc_root(root) &&
2601 !btrfs_is_free_space_inode(inode) &&
2602 !(state->state & EXTENT_NORESERVE) &&
2603 (bits & EXTENT_CLEAR_DATA_RESV))
2604 btrfs_free_reserved_data_space_noquota(fs_info, len);
2605
2606 percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
2607 fs_info->delalloc_batch);
2608 spin_lock(&inode->lock);
2609 inode->delalloc_bytes -= len;
2610 new_delalloc_bytes = inode->delalloc_bytes;
2611 spin_unlock(&inode->lock);
2612
2613 /*
2614 * We don't need to be under the protection of the inode's lock,
2615 * because we are called while holding the inode's io_tree lock
2616 * and are therefore protected against concurrent calls of this
2617 * function and btrfs_set_delalloc_extent().
2618 */
2619 if (!btrfs_is_free_space_inode(inode) && new_delalloc_bytes == 0) {
2620 spin_lock(&root->delalloc_lock);
2621 btrfs_del_delalloc_inode(inode);
2622 spin_unlock(&root->delalloc_lock);
2623 }
2624 }
2625
2626 if ((state->state & EXTENT_DELALLOC_NEW) &&
2627 (bits & EXTENT_DELALLOC_NEW)) {
2628 spin_lock(&inode->lock);
2629 ASSERT(inode->new_delalloc_bytes >= len);
2630 inode->new_delalloc_bytes -= len;
2631 if (bits & EXTENT_ADD_INODE_BYTES)
2632 inode_add_bytes(&inode->vfs_inode, len);
2633 spin_unlock(&inode->lock);
2634 }
2635 }
2636
2637 /*
2638 * given a list of ordered sums record them in the inode. This happens
2639 * at IO completion time based on sums calculated at bio submission time.
2640 */
add_pending_csums(struct btrfs_trans_handle * trans,struct list_head * list)2641 static int add_pending_csums(struct btrfs_trans_handle *trans,
2642 struct list_head *list)
2643 {
2644 struct btrfs_ordered_sum *sum;
2645 struct btrfs_root *csum_root = NULL;
2646 int ret;
2647
2648 list_for_each_entry(sum, list, list) {
2649 trans->adding_csums = true;
2650 if (!csum_root)
2651 csum_root = btrfs_csum_root(trans->fs_info,
2652 sum->logical);
2653 ret = btrfs_csum_file_blocks(trans, csum_root, sum);
2654 trans->adding_csums = false;
2655 if (ret)
2656 return ret;
2657 }
2658 return 0;
2659 }
2660
btrfs_find_new_delalloc_bytes(struct btrfs_inode * inode,const u64 start,const u64 len,struct extent_state ** cached_state)2661 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
2662 const u64 start,
2663 const u64 len,
2664 struct extent_state **cached_state)
2665 {
2666 u64 search_start = start;
2667 const u64 end = start + len - 1;
2668
2669 while (search_start < end) {
2670 const u64 search_len = end - search_start + 1;
2671 struct extent_map *em;
2672 u64 em_len;
2673 int ret = 0;
2674
2675 em = btrfs_get_extent(inode, NULL, search_start, search_len);
2676 if (IS_ERR(em))
2677 return PTR_ERR(em);
2678
2679 if (em->disk_bytenr != EXTENT_MAP_HOLE)
2680 goto next;
2681
2682 em_len = em->len;
2683 if (em->start < search_start)
2684 em_len -= search_start - em->start;
2685 if (em_len > search_len)
2686 em_len = search_len;
2687
2688 ret = set_extent_bit(&inode->io_tree, search_start,
2689 search_start + em_len - 1,
2690 EXTENT_DELALLOC_NEW, cached_state);
2691 next:
2692 search_start = extent_map_end(em);
2693 free_extent_map(em);
2694 if (ret)
2695 return ret;
2696 }
2697 return 0;
2698 }
2699
btrfs_set_extent_delalloc(struct btrfs_inode * inode,u64 start,u64 end,unsigned int extra_bits,struct extent_state ** cached_state)2700 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
2701 unsigned int extra_bits,
2702 struct extent_state **cached_state)
2703 {
2704 WARN_ON(PAGE_ALIGNED(end));
2705
2706 if (start >= i_size_read(&inode->vfs_inode) &&
2707 !(inode->flags & BTRFS_INODE_PREALLOC)) {
2708 /*
2709 * There can't be any extents following eof in this case so just
2710 * set the delalloc new bit for the range directly.
2711 */
2712 extra_bits |= EXTENT_DELALLOC_NEW;
2713 } else {
2714 int ret;
2715
2716 ret = btrfs_find_new_delalloc_bytes(inode, start,
2717 end + 1 - start,
2718 cached_state);
2719 if (ret)
2720 return ret;
2721 }
2722
2723 return set_extent_bit(&inode->io_tree, start, end,
2724 EXTENT_DELALLOC | extra_bits, cached_state);
2725 }
2726
2727 /* see btrfs_writepage_start_hook for details on why this is required */
2728 struct btrfs_writepage_fixup {
2729 struct folio *folio;
2730 struct btrfs_inode *inode;
2731 struct btrfs_work work;
2732 };
2733
btrfs_writepage_fixup_worker(struct btrfs_work * work)2734 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
2735 {
2736 struct btrfs_writepage_fixup *fixup =
2737 container_of(work, struct btrfs_writepage_fixup, work);
2738 struct btrfs_ordered_extent *ordered;
2739 struct extent_state *cached_state = NULL;
2740 struct extent_changeset *data_reserved = NULL;
2741 struct folio *folio = fixup->folio;
2742 struct btrfs_inode *inode = fixup->inode;
2743 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2744 u64 page_start = folio_pos(folio);
2745 u64 page_end = folio_pos(folio) + folio_size(folio) - 1;
2746 int ret = 0;
2747 bool free_delalloc_space = true;
2748
2749 /*
2750 * This is similar to page_mkwrite, we need to reserve the space before
2751 * we take the folio lock.
2752 */
2753 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, page_start,
2754 folio_size(folio));
2755 again:
2756 folio_lock(folio);
2757
2758 /*
2759 * Before we queued this fixup, we took a reference on the folio.
2760 * folio->mapping may go NULL, but it shouldn't be moved to a different
2761 * address space.
2762 */
2763 if (!folio->mapping || !folio_test_dirty(folio) ||
2764 !folio_test_checked(folio)) {
2765 /*
2766 * Unfortunately this is a little tricky, either
2767 *
2768 * 1) We got here and our folio had already been dealt with and
2769 * we reserved our space, thus ret == 0, so we need to just
2770 * drop our space reservation and bail. This can happen the
2771 * first time we come into the fixup worker, or could happen
2772 * while waiting for the ordered extent.
2773 * 2) Our folio was already dealt with, but we happened to get an
2774 * ENOSPC above from the btrfs_delalloc_reserve_space. In
2775 * this case we obviously don't have anything to release, but
2776 * because the folio was already dealt with we don't want to
2777 * mark the folio with an error, so make sure we're resetting
2778 * ret to 0. This is why we have this check _before_ the ret
2779 * check, because we do not want to have a surprise ENOSPC
2780 * when the folio was already properly dealt with.
2781 */
2782 if (!ret) {
2783 btrfs_delalloc_release_extents(inode, folio_size(folio));
2784 btrfs_delalloc_release_space(inode, data_reserved,
2785 page_start, folio_size(folio),
2786 true);
2787 }
2788 ret = 0;
2789 goto out_page;
2790 }
2791
2792 /*
2793 * We can't mess with the folio state unless it is locked, so now that
2794 * it is locked bail if we failed to make our space reservation.
2795 */
2796 if (ret)
2797 goto out_page;
2798
2799 lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
2800
2801 /* already ordered? We're done */
2802 if (folio_test_ordered(folio))
2803 goto out_reserved;
2804
2805 ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
2806 if (ordered) {
2807 unlock_extent(&inode->io_tree, page_start, page_end,
2808 &cached_state);
2809 folio_unlock(folio);
2810 btrfs_start_ordered_extent(ordered);
2811 btrfs_put_ordered_extent(ordered);
2812 goto again;
2813 }
2814
2815 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
2816 &cached_state);
2817 if (ret)
2818 goto out_reserved;
2819
2820 /*
2821 * Everything went as planned, we're now the owner of a dirty page with
2822 * delayed allocation bits set and space reserved for our COW
2823 * destination.
2824 *
2825 * The page was dirty when we started, nothing should have cleaned it.
2826 */
2827 BUG_ON(!folio_test_dirty(folio));
2828 free_delalloc_space = false;
2829 out_reserved:
2830 btrfs_delalloc_release_extents(inode, PAGE_SIZE);
2831 if (free_delalloc_space)
2832 btrfs_delalloc_release_space(inode, data_reserved, page_start,
2833 PAGE_SIZE, true);
2834 unlock_extent(&inode->io_tree, page_start, page_end, &cached_state);
2835 out_page:
2836 if (ret) {
2837 /*
2838 * We hit ENOSPC or other errors. Update the mapping and page
2839 * to reflect the errors and clean the page.
2840 */
2841 mapping_set_error(folio->mapping, ret);
2842 btrfs_mark_ordered_io_finished(inode, folio, page_start,
2843 folio_size(folio), !ret);
2844 folio_clear_dirty_for_io(folio);
2845 }
2846 btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE);
2847 folio_unlock(folio);
2848 folio_put(folio);
2849 kfree(fixup);
2850 extent_changeset_free(data_reserved);
2851 /*
2852 * As a precaution, do a delayed iput in case it would be the last iput
2853 * that could need flushing space. Recursing back to fixup worker would
2854 * deadlock.
2855 */
2856 btrfs_add_delayed_iput(inode);
2857 }
2858
2859 /*
2860 * There are a few paths in the higher layers of the kernel that directly
2861 * set the folio dirty bit without asking the filesystem if it is a
2862 * good idea. This causes problems because we want to make sure COW
2863 * properly happens and the data=ordered rules are followed.
2864 *
2865 * In our case any range that doesn't have the ORDERED bit set
2866 * hasn't been properly setup for IO. We kick off an async process
2867 * to fix it up. The async helper will wait for ordered extents, set
2868 * the delalloc bit and make it safe to write the folio.
2869 */
btrfs_writepage_cow_fixup(struct folio * folio)2870 int btrfs_writepage_cow_fixup(struct folio *folio)
2871 {
2872 struct inode *inode = folio->mapping->host;
2873 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
2874 struct btrfs_writepage_fixup *fixup;
2875
2876 /* This folio has ordered extent covering it already */
2877 if (folio_test_ordered(folio))
2878 return 0;
2879
2880 /*
2881 * folio_checked is set below when we create a fixup worker for this
2882 * folio, don't try to create another one if we're already
2883 * folio_test_checked.
2884 *
2885 * The extent_io writepage code will redirty the foio if we send back
2886 * EAGAIN.
2887 */
2888 if (folio_test_checked(folio))
2889 return -EAGAIN;
2890
2891 fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
2892 if (!fixup)
2893 return -EAGAIN;
2894
2895 /*
2896 * We are already holding a reference to this inode from
2897 * write_cache_pages. We need to hold it because the space reservation
2898 * takes place outside of the folio lock, and we can't trust
2899 * page->mapping outside of the folio lock.
2900 */
2901 ihold(inode);
2902 btrfs_folio_set_checked(fs_info, folio, folio_pos(folio), folio_size(folio));
2903 folio_get(folio);
2904 btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL);
2905 fixup->folio = folio;
2906 fixup->inode = BTRFS_I(inode);
2907 btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
2908
2909 return -EAGAIN;
2910 }
2911
insert_reserved_file_extent(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,u64 file_pos,struct btrfs_file_extent_item * stack_fi,const bool update_inode_bytes,u64 qgroup_reserved)2912 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
2913 struct btrfs_inode *inode, u64 file_pos,
2914 struct btrfs_file_extent_item *stack_fi,
2915 const bool update_inode_bytes,
2916 u64 qgroup_reserved)
2917 {
2918 struct btrfs_root *root = inode->root;
2919 const u64 sectorsize = root->fs_info->sectorsize;
2920 struct btrfs_path *path;
2921 struct extent_buffer *leaf;
2922 struct btrfs_key ins;
2923 u64 disk_num_bytes = btrfs_stack_file_extent_disk_num_bytes(stack_fi);
2924 u64 disk_bytenr = btrfs_stack_file_extent_disk_bytenr(stack_fi);
2925 u64 offset = btrfs_stack_file_extent_offset(stack_fi);
2926 u64 num_bytes = btrfs_stack_file_extent_num_bytes(stack_fi);
2927 u64 ram_bytes = btrfs_stack_file_extent_ram_bytes(stack_fi);
2928 struct btrfs_drop_extents_args drop_args = { 0 };
2929 int ret;
2930
2931 path = btrfs_alloc_path();
2932 if (!path)
2933 return -ENOMEM;
2934
2935 /*
2936 * we may be replacing one extent in the tree with another.
2937 * The new extent is pinned in the extent map, and we don't want
2938 * to drop it from the cache until it is completely in the btree.
2939 *
2940 * So, tell btrfs_drop_extents to leave this extent in the cache.
2941 * the caller is expected to unpin it and allow it to be merged
2942 * with the others.
2943 */
2944 drop_args.path = path;
2945 drop_args.start = file_pos;
2946 drop_args.end = file_pos + num_bytes;
2947 drop_args.replace_extent = true;
2948 drop_args.extent_item_size = sizeof(*stack_fi);
2949 ret = btrfs_drop_extents(trans, root, inode, &drop_args);
2950 if (ret)
2951 goto out;
2952
2953 if (!drop_args.extent_inserted) {
2954 ins.objectid = btrfs_ino(inode);
2955 ins.offset = file_pos;
2956 ins.type = BTRFS_EXTENT_DATA_KEY;
2957
2958 ret = btrfs_insert_empty_item(trans, root, path, &ins,
2959 sizeof(*stack_fi));
2960 if (ret)
2961 goto out;
2962 }
2963 leaf = path->nodes[0];
2964 btrfs_set_stack_file_extent_generation(stack_fi, trans->transid);
2965 write_extent_buffer(leaf, stack_fi,
2966 btrfs_item_ptr_offset(leaf, path->slots[0]),
2967 sizeof(struct btrfs_file_extent_item));
2968
2969 btrfs_release_path(path);
2970
2971 /*
2972 * If we dropped an inline extent here, we know the range where it is
2973 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the
2974 * number of bytes only for that range containing the inline extent.
2975 * The remaining of the range will be processed when clearning the
2976 * EXTENT_DELALLOC_BIT bit through the ordered extent completion.
2977 */
2978 if (file_pos == 0 && !IS_ALIGNED(drop_args.bytes_found, sectorsize)) {
2979 u64 inline_size = round_down(drop_args.bytes_found, sectorsize);
2980
2981 inline_size = drop_args.bytes_found - inline_size;
2982 btrfs_update_inode_bytes(inode, sectorsize, inline_size);
2983 drop_args.bytes_found -= inline_size;
2984 num_bytes -= sectorsize;
2985 }
2986
2987 if (update_inode_bytes)
2988 btrfs_update_inode_bytes(inode, num_bytes, drop_args.bytes_found);
2989
2990 ins.objectid = disk_bytenr;
2991 ins.offset = disk_num_bytes;
2992 ins.type = BTRFS_EXTENT_ITEM_KEY;
2993
2994 ret = btrfs_inode_set_file_extent_range(inode, file_pos, ram_bytes);
2995 if (ret)
2996 goto out;
2997
2998 ret = btrfs_alloc_reserved_file_extent(trans, root, btrfs_ino(inode),
2999 file_pos - offset,
3000 qgroup_reserved, &ins);
3001 out:
3002 btrfs_free_path(path);
3003
3004 return ret;
3005 }
3006
btrfs_release_delalloc_bytes(struct btrfs_fs_info * fs_info,u64 start,u64 len)3007 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
3008 u64 start, u64 len)
3009 {
3010 struct btrfs_block_group *cache;
3011
3012 cache = btrfs_lookup_block_group(fs_info, start);
3013 ASSERT(cache);
3014
3015 spin_lock(&cache->lock);
3016 cache->delalloc_bytes -= len;
3017 spin_unlock(&cache->lock);
3018
3019 btrfs_put_block_group(cache);
3020 }
3021
insert_ordered_extent_file_extent(struct btrfs_trans_handle * trans,struct btrfs_ordered_extent * oe)3022 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans,
3023 struct btrfs_ordered_extent *oe)
3024 {
3025 struct btrfs_file_extent_item stack_fi;
3026 bool update_inode_bytes;
3027 u64 num_bytes = oe->num_bytes;
3028 u64 ram_bytes = oe->ram_bytes;
3029
3030 memset(&stack_fi, 0, sizeof(stack_fi));
3031 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_REG);
3032 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr);
3033 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi,
3034 oe->disk_num_bytes);
3035 btrfs_set_stack_file_extent_offset(&stack_fi, oe->offset);
3036 if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags))
3037 num_bytes = oe->truncated_len;
3038 btrfs_set_stack_file_extent_num_bytes(&stack_fi, num_bytes);
3039 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, ram_bytes);
3040 btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type);
3041 /* Encryption and other encoding is reserved and all 0 */
3042
3043 /*
3044 * For delalloc, when completing an ordered extent we update the inode's
3045 * bytes when clearing the range in the inode's io tree, so pass false
3046 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(),
3047 * except if the ordered extent was truncated.
3048 */
3049 update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) ||
3050 test_bit(BTRFS_ORDERED_ENCODED, &oe->flags) ||
3051 test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags);
3052
3053 return insert_reserved_file_extent(trans, oe->inode,
3054 oe->file_offset, &stack_fi,
3055 update_inode_bytes, oe->qgroup_rsv);
3056 }
3057
3058 /*
3059 * As ordered data IO finishes, this gets called so we can finish
3060 * an ordered extent if the range of bytes in the file it covers are
3061 * fully written.
3062 */
btrfs_finish_one_ordered(struct btrfs_ordered_extent * ordered_extent)3063 int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
3064 {
3065 struct btrfs_inode *inode = ordered_extent->inode;
3066 struct btrfs_root *root = inode->root;
3067 struct btrfs_fs_info *fs_info = root->fs_info;
3068 struct btrfs_trans_handle *trans = NULL;
3069 struct extent_io_tree *io_tree = &inode->io_tree;
3070 struct extent_state *cached_state = NULL;
3071 u64 start, end;
3072 int compress_type = 0;
3073 int ret = 0;
3074 u64 logical_len = ordered_extent->num_bytes;
3075 bool freespace_inode;
3076 bool truncated = false;
3077 bool clear_reserved_extent = true;
3078 unsigned int clear_bits = EXTENT_DEFRAG;
3079
3080 start = ordered_extent->file_offset;
3081 end = start + ordered_extent->num_bytes - 1;
3082
3083 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3084 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
3085 !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags) &&
3086 !test_bit(BTRFS_ORDERED_ENCODED, &ordered_extent->flags))
3087 clear_bits |= EXTENT_DELALLOC_NEW;
3088
3089 freespace_inode = btrfs_is_free_space_inode(inode);
3090 if (!freespace_inode)
3091 btrfs_lockdep_acquire(fs_info, btrfs_ordered_extent);
3092
3093 if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
3094 ret = -EIO;
3095 goto out;
3096 }
3097
3098 if (btrfs_is_zoned(fs_info))
3099 btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr,
3100 ordered_extent->disk_num_bytes);
3101
3102 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
3103 truncated = true;
3104 logical_len = ordered_extent->truncated_len;
3105 /* Truncated the entire extent, don't bother adding */
3106 if (!logical_len)
3107 goto out;
3108 }
3109
3110 /*
3111 * If it's a COW write we need to lock the extent range as we will be
3112 * inserting/replacing file extent items and unpinning an extent map.
3113 * This must be taken before joining a transaction, as it's a higher
3114 * level lock (like the inode's VFS lock), otherwise we can run into an
3115 * ABBA deadlock with other tasks (transactions work like a lock,
3116 * depending on their current state).
3117 */
3118 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
3119 clear_bits |= EXTENT_LOCKED;
3120 lock_extent(io_tree, start, end, &cached_state);
3121 }
3122
3123 if (freespace_inode)
3124 trans = btrfs_join_transaction_spacecache(root);
3125 else
3126 trans = btrfs_join_transaction(root);
3127 if (IS_ERR(trans)) {
3128 ret = PTR_ERR(trans);
3129 trans = NULL;
3130 goto out;
3131 }
3132
3133 trans->block_rsv = &inode->block_rsv;
3134
3135 ret = btrfs_insert_raid_extent(trans, ordered_extent);
3136 if (ret) {
3137 btrfs_abort_transaction(trans, ret);
3138 goto out;
3139 }
3140
3141 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
3142 /* Logic error */
3143 ASSERT(list_empty(&ordered_extent->list));
3144 if (!list_empty(&ordered_extent->list)) {
3145 ret = -EINVAL;
3146 btrfs_abort_transaction(trans, ret);
3147 goto out;
3148 }
3149
3150 btrfs_inode_safe_disk_i_size_write(inode, 0);
3151 ret = btrfs_update_inode_fallback(trans, inode);
3152 if (ret) {
3153 /* -ENOMEM or corruption */
3154 btrfs_abort_transaction(trans, ret);
3155 }
3156 goto out;
3157 }
3158
3159 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
3160 compress_type = ordered_extent->compress_type;
3161 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3162 BUG_ON(compress_type);
3163 ret = btrfs_mark_extent_written(trans, inode,
3164 ordered_extent->file_offset,
3165 ordered_extent->file_offset +
3166 logical_len);
3167 btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr,
3168 ordered_extent->disk_num_bytes);
3169 } else {
3170 BUG_ON(root == fs_info->tree_root);
3171 ret = insert_ordered_extent_file_extent(trans, ordered_extent);
3172 if (!ret) {
3173 clear_reserved_extent = false;
3174 btrfs_release_delalloc_bytes(fs_info,
3175 ordered_extent->disk_bytenr,
3176 ordered_extent->disk_num_bytes);
3177 }
3178 }
3179 if (ret < 0) {
3180 btrfs_abort_transaction(trans, ret);
3181 goto out;
3182 }
3183
3184 ret = unpin_extent_cache(inode, ordered_extent->file_offset,
3185 ordered_extent->num_bytes, trans->transid);
3186 if (ret < 0) {
3187 btrfs_abort_transaction(trans, ret);
3188 goto out;
3189 }
3190
3191 ret = add_pending_csums(trans, &ordered_extent->list);
3192 if (ret) {
3193 btrfs_abort_transaction(trans, ret);
3194 goto out;
3195 }
3196
3197 /*
3198 * If this is a new delalloc range, clear its new delalloc flag to
3199 * update the inode's number of bytes. This needs to be done first
3200 * before updating the inode item.
3201 */
3202 if ((clear_bits & EXTENT_DELALLOC_NEW) &&
3203 !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags))
3204 clear_extent_bit(&inode->io_tree, start, end,
3205 EXTENT_DELALLOC_NEW | EXTENT_ADD_INODE_BYTES,
3206 &cached_state);
3207
3208 btrfs_inode_safe_disk_i_size_write(inode, 0);
3209 ret = btrfs_update_inode_fallback(trans, inode);
3210 if (ret) { /* -ENOMEM or corruption */
3211 btrfs_abort_transaction(trans, ret);
3212 goto out;
3213 }
3214 out:
3215 clear_extent_bit(&inode->io_tree, start, end, clear_bits,
3216 &cached_state);
3217
3218 if (trans)
3219 btrfs_end_transaction(trans);
3220
3221 if (ret || truncated) {
3222 u64 unwritten_start = start;
3223
3224 /*
3225 * If we failed to finish this ordered extent for any reason we
3226 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
3227 * extent, and mark the inode with the error if it wasn't
3228 * already set. Any error during writeback would have already
3229 * set the mapping error, so we need to set it if we're the ones
3230 * marking this ordered extent as failed.
3231 */
3232 if (ret)
3233 btrfs_mark_ordered_extent_error(ordered_extent);
3234
3235 if (truncated)
3236 unwritten_start += logical_len;
3237 clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
3238
3239 /*
3240 * Drop extent maps for the part of the extent we didn't write.
3241 *
3242 * We have an exception here for the free_space_inode, this is
3243 * because when we do btrfs_get_extent() on the free space inode
3244 * we will search the commit root. If this is a new block group
3245 * we won't find anything, and we will trip over the assert in
3246 * writepage where we do ASSERT(em->block_start !=
3247 * EXTENT_MAP_HOLE).
3248 *
3249 * Theoretically we could also skip this for any NOCOW extent as
3250 * we don't mess with the extent map tree in the NOCOW case, but
3251 * for now simply skip this if we are the free space inode.
3252 */
3253 if (!btrfs_is_free_space_inode(inode))
3254 btrfs_drop_extent_map_range(inode, unwritten_start,
3255 end, false);
3256
3257 /*
3258 * If the ordered extent had an IOERR or something else went
3259 * wrong we need to return the space for this ordered extent
3260 * back to the allocator. We only free the extent in the
3261 * truncated case if we didn't write out the extent at all.
3262 *
3263 * If we made it past insert_reserved_file_extent before we
3264 * errored out then we don't need to do this as the accounting
3265 * has already been done.
3266 */
3267 if ((ret || !logical_len) &&
3268 clear_reserved_extent &&
3269 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3270 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3271 /*
3272 * Discard the range before returning it back to the
3273 * free space pool
3274 */
3275 if (ret && btrfs_test_opt(fs_info, DISCARD_SYNC))
3276 btrfs_discard_extent(fs_info,
3277 ordered_extent->disk_bytenr,
3278 ordered_extent->disk_num_bytes,
3279 NULL);
3280 btrfs_free_reserved_extent(fs_info,
3281 ordered_extent->disk_bytenr,
3282 ordered_extent->disk_num_bytes, 1);
3283 /*
3284 * Actually free the qgroup rsv which was released when
3285 * the ordered extent was created.
3286 */
3287 btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(inode->root),
3288 ordered_extent->qgroup_rsv,
3289 BTRFS_QGROUP_RSV_DATA);
3290 }
3291 }
3292
3293 /*
3294 * This needs to be done to make sure anybody waiting knows we are done
3295 * updating everything for this ordered extent.
3296 */
3297 btrfs_remove_ordered_extent(inode, ordered_extent);
3298
3299 /* once for us */
3300 btrfs_put_ordered_extent(ordered_extent);
3301 /* once for the tree */
3302 btrfs_put_ordered_extent(ordered_extent);
3303
3304 return ret;
3305 }
3306
btrfs_finish_ordered_io(struct btrfs_ordered_extent * ordered)3307 int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered)
3308 {
3309 if (btrfs_is_zoned(ordered->inode->root->fs_info) &&
3310 !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
3311 list_empty(&ordered->bioc_list))
3312 btrfs_finish_ordered_zoned(ordered);
3313 return btrfs_finish_one_ordered(ordered);
3314 }
3315
3316 /*
3317 * Verify the checksum for a single sector without any extra action that depend
3318 * on the type of I/O.
3319 */
btrfs_check_sector_csum(struct btrfs_fs_info * fs_info,struct page * page,u32 pgoff,u8 * csum,const u8 * const csum_expected)3320 int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page,
3321 u32 pgoff, u8 *csum, const u8 * const csum_expected)
3322 {
3323 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
3324 char *kaddr;
3325
3326 ASSERT(pgoff + fs_info->sectorsize <= PAGE_SIZE);
3327
3328 shash->tfm = fs_info->csum_shash;
3329
3330 kaddr = kmap_local_page(page) + pgoff;
3331 crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum);
3332 kunmap_local(kaddr);
3333
3334 if (memcmp(csum, csum_expected, fs_info->csum_size))
3335 return -EIO;
3336 return 0;
3337 }
3338
3339 /*
3340 * Verify the checksum of a single data sector.
3341 *
3342 * @bbio: btrfs_io_bio which contains the csum
3343 * @dev: device the sector is on
3344 * @bio_offset: offset to the beginning of the bio (in bytes)
3345 * @bv: bio_vec to check
3346 *
3347 * Check if the checksum on a data block is valid. When a checksum mismatch is
3348 * detected, report the error and fill the corrupted range with zero.
3349 *
3350 * Return %true if the sector is ok or had no checksum to start with, else %false.
3351 */
btrfs_data_csum_ok(struct btrfs_bio * bbio,struct btrfs_device * dev,u32 bio_offset,struct bio_vec * bv)3352 bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev,
3353 u32 bio_offset, struct bio_vec *bv)
3354 {
3355 struct btrfs_inode *inode = bbio->inode;
3356 struct btrfs_fs_info *fs_info = inode->root->fs_info;
3357 u64 file_offset = bbio->file_offset + bio_offset;
3358 u64 end = file_offset + bv->bv_len - 1;
3359 u8 *csum_expected;
3360 u8 csum[BTRFS_CSUM_SIZE];
3361
3362 ASSERT(bv->bv_len == fs_info->sectorsize);
3363
3364 if (!bbio->csum)
3365 return true;
3366
3367 if (btrfs_is_data_reloc_root(inode->root) &&
3368 test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM,
3369 NULL)) {
3370 /* Skip the range without csum for data reloc inode */
3371 clear_extent_bits(&inode->io_tree, file_offset, end,
3372 EXTENT_NODATASUM);
3373 return true;
3374 }
3375
3376 csum_expected = bbio->csum + (bio_offset >> fs_info->sectorsize_bits) *
3377 fs_info->csum_size;
3378 if (btrfs_check_sector_csum(fs_info, bv->bv_page, bv->bv_offset, csum,
3379 csum_expected))
3380 goto zeroit;
3381 return true;
3382
3383 zeroit:
3384 btrfs_print_data_csum_error(inode, file_offset, csum, csum_expected,
3385 bbio->mirror_num);
3386 if (dev)
3387 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS);
3388 memzero_bvec(bv);
3389 return false;
3390 }
3391
3392 /*
3393 * Perform a delayed iput on @inode.
3394 *
3395 * @inode: The inode we want to perform iput on
3396 *
3397 * This function uses the generic vfs_inode::i_count to track whether we should
3398 * just decrement it (in case it's > 1) or if this is the last iput then link
3399 * the inode to the delayed iput machinery. Delayed iputs are processed at
3400 * transaction commit time/superblock commit/cleaner kthread.
3401 */
btrfs_add_delayed_iput(struct btrfs_inode * inode)3402 void btrfs_add_delayed_iput(struct btrfs_inode *inode)
3403 {
3404 struct btrfs_fs_info *fs_info = inode->root->fs_info;
3405 unsigned long flags;
3406
3407 if (atomic_add_unless(&inode->vfs_inode.i_count, -1, 1))
3408 return;
3409
3410 atomic_inc(&fs_info->nr_delayed_iputs);
3411 /*
3412 * Need to be irq safe here because we can be called from either an irq
3413 * context (see bio.c and btrfs_put_ordered_extent()) or a non-irq
3414 * context.
3415 */
3416 spin_lock_irqsave(&fs_info->delayed_iput_lock, flags);
3417 ASSERT(list_empty(&inode->delayed_iput));
3418 list_add_tail(&inode->delayed_iput, &fs_info->delayed_iputs);
3419 spin_unlock_irqrestore(&fs_info->delayed_iput_lock, flags);
3420 if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags))
3421 wake_up_process(fs_info->cleaner_kthread);
3422 }
3423
run_delayed_iput_locked(struct btrfs_fs_info * fs_info,struct btrfs_inode * inode)3424 static void run_delayed_iput_locked(struct btrfs_fs_info *fs_info,
3425 struct btrfs_inode *inode)
3426 {
3427 list_del_init(&inode->delayed_iput);
3428 spin_unlock_irq(&fs_info->delayed_iput_lock);
3429 iput(&inode->vfs_inode);
3430 if (atomic_dec_and_test(&fs_info->nr_delayed_iputs))
3431 wake_up(&fs_info->delayed_iputs_wait);
3432 spin_lock_irq(&fs_info->delayed_iput_lock);
3433 }
3434
btrfs_run_delayed_iput(struct btrfs_fs_info * fs_info,struct btrfs_inode * inode)3435 static void btrfs_run_delayed_iput(struct btrfs_fs_info *fs_info,
3436 struct btrfs_inode *inode)
3437 {
3438 if (!list_empty(&inode->delayed_iput)) {
3439 spin_lock_irq(&fs_info->delayed_iput_lock);
3440 if (!list_empty(&inode->delayed_iput))
3441 run_delayed_iput_locked(fs_info, inode);
3442 spin_unlock_irq(&fs_info->delayed_iput_lock);
3443 }
3444 }
3445
btrfs_run_delayed_iputs(struct btrfs_fs_info * fs_info)3446 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
3447 {
3448 /*
3449 * btrfs_put_ordered_extent() can run in irq context (see bio.c), which
3450 * calls btrfs_add_delayed_iput() and that needs to lock
3451 * fs_info->delayed_iput_lock. So we need to disable irqs here to
3452 * prevent a deadlock.
3453 */
3454 spin_lock_irq(&fs_info->delayed_iput_lock);
3455 while (!list_empty(&fs_info->delayed_iputs)) {
3456 struct btrfs_inode *inode;
3457
3458 inode = list_first_entry(&fs_info->delayed_iputs,
3459 struct btrfs_inode, delayed_iput);
3460 run_delayed_iput_locked(fs_info, inode);
3461 if (need_resched()) {
3462 spin_unlock_irq(&fs_info->delayed_iput_lock);
3463 cond_resched();
3464 spin_lock_irq(&fs_info->delayed_iput_lock);
3465 }
3466 }
3467 spin_unlock_irq(&fs_info->delayed_iput_lock);
3468 }
3469
3470 /*
3471 * Wait for flushing all delayed iputs
3472 *
3473 * @fs_info: the filesystem
3474 *
3475 * This will wait on any delayed iputs that are currently running with KILLABLE
3476 * set. Once they are all done running we will return, unless we are killed in
3477 * which case we return EINTR. This helps in user operations like fallocate etc
3478 * that might get blocked on the iputs.
3479 *
3480 * Return EINTR if we were killed, 0 if nothing's pending
3481 */
btrfs_wait_on_delayed_iputs(struct btrfs_fs_info * fs_info)3482 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info)
3483 {
3484 int ret = wait_event_killable(fs_info->delayed_iputs_wait,
3485 atomic_read(&fs_info->nr_delayed_iputs) == 0);
3486 if (ret)
3487 return -EINTR;
3488 return 0;
3489 }
3490
3491 /*
3492 * This creates an orphan entry for the given inode in case something goes wrong
3493 * in the middle of an unlink.
3494 */
btrfs_orphan_add(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)3495 int btrfs_orphan_add(struct btrfs_trans_handle *trans,
3496 struct btrfs_inode *inode)
3497 {
3498 int ret;
3499
3500 ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode));
3501 if (ret && ret != -EEXIST) {
3502 btrfs_abort_transaction(trans, ret);
3503 return ret;
3504 }
3505
3506 return 0;
3507 }
3508
3509 /*
3510 * We have done the delete so we can go ahead and remove the orphan item for
3511 * this particular inode.
3512 */
btrfs_orphan_del(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)3513 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3514 struct btrfs_inode *inode)
3515 {
3516 return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode));
3517 }
3518
3519 /*
3520 * this cleans up any orphans that may be left on the list from the last use
3521 * of this root.
3522 */
btrfs_orphan_cleanup(struct btrfs_root * root)3523 int btrfs_orphan_cleanup(struct btrfs_root *root)
3524 {
3525 struct btrfs_fs_info *fs_info = root->fs_info;
3526 struct btrfs_path *path;
3527 struct extent_buffer *leaf;
3528 struct btrfs_key key, found_key;
3529 struct btrfs_trans_handle *trans;
3530 struct inode *inode;
3531 u64 last_objectid = 0;
3532 int ret = 0, nr_unlink = 0;
3533
3534 if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state))
3535 return 0;
3536
3537 path = btrfs_alloc_path();
3538 if (!path) {
3539 ret = -ENOMEM;
3540 goto out;
3541 }
3542 path->reada = READA_BACK;
3543
3544 key.objectid = BTRFS_ORPHAN_OBJECTID;
3545 key.type = BTRFS_ORPHAN_ITEM_KEY;
3546 key.offset = (u64)-1;
3547
3548 while (1) {
3549 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3550 if (ret < 0)
3551 goto out;
3552
3553 /*
3554 * if ret == 0 means we found what we were searching for, which
3555 * is weird, but possible, so only screw with path if we didn't
3556 * find the key and see if we have stuff that matches
3557 */
3558 if (ret > 0) {
3559 ret = 0;
3560 if (path->slots[0] == 0)
3561 break;
3562 path->slots[0]--;
3563 }
3564
3565 /* pull out the item */
3566 leaf = path->nodes[0];
3567 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3568
3569 /* make sure the item matches what we want */
3570 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3571 break;
3572 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
3573 break;
3574
3575 /* release the path since we're done with it */
3576 btrfs_release_path(path);
3577
3578 /*
3579 * this is where we are basically btrfs_lookup, without the
3580 * crossing root thing. we store the inode number in the
3581 * offset of the orphan item.
3582 */
3583
3584 if (found_key.offset == last_objectid) {
3585 /*
3586 * We found the same inode as before. This means we were
3587 * not able to remove its items via eviction triggered
3588 * by an iput(). A transaction abort may have happened,
3589 * due to -ENOSPC for example, so try to grab the error
3590 * that lead to a transaction abort, if any.
3591 */
3592 btrfs_err(fs_info,
3593 "Error removing orphan entry, stopping orphan cleanup");
3594 ret = BTRFS_FS_ERROR(fs_info) ?: -EINVAL;
3595 goto out;
3596 }
3597
3598 last_objectid = found_key.offset;
3599
3600 found_key.objectid = found_key.offset;
3601 found_key.type = BTRFS_INODE_ITEM_KEY;
3602 found_key.offset = 0;
3603 inode = btrfs_iget(last_objectid, root);
3604 if (IS_ERR(inode)) {
3605 ret = PTR_ERR(inode);
3606 inode = NULL;
3607 if (ret != -ENOENT)
3608 goto out;
3609 }
3610
3611 if (!inode && root == fs_info->tree_root) {
3612 struct btrfs_root *dead_root;
3613 int is_dead_root = 0;
3614
3615 /*
3616 * This is an orphan in the tree root. Currently these
3617 * could come from 2 sources:
3618 * a) a root (snapshot/subvolume) deletion in progress
3619 * b) a free space cache inode
3620 * We need to distinguish those two, as the orphan item
3621 * for a root must not get deleted before the deletion
3622 * of the snapshot/subvolume's tree completes.
3623 *
3624 * btrfs_find_orphan_roots() ran before us, which has
3625 * found all deleted roots and loaded them into
3626 * fs_info->fs_roots_radix. So here we can find if an
3627 * orphan item corresponds to a deleted root by looking
3628 * up the root from that radix tree.
3629 */
3630
3631 spin_lock(&fs_info->fs_roots_radix_lock);
3632 dead_root = radix_tree_lookup(&fs_info->fs_roots_radix,
3633 (unsigned long)found_key.objectid);
3634 if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0)
3635 is_dead_root = 1;
3636 spin_unlock(&fs_info->fs_roots_radix_lock);
3637
3638 if (is_dead_root) {
3639 /* prevent this orphan from being found again */
3640 key.offset = found_key.objectid - 1;
3641 continue;
3642 }
3643
3644 }
3645
3646 /*
3647 * If we have an inode with links, there are a couple of
3648 * possibilities:
3649 *
3650 * 1. We were halfway through creating fsverity metadata for the
3651 * file. In that case, the orphan item represents incomplete
3652 * fsverity metadata which must be cleaned up with
3653 * btrfs_drop_verity_items and deleting the orphan item.
3654
3655 * 2. Old kernels (before v3.12) used to create an
3656 * orphan item for truncate indicating that there were possibly
3657 * extent items past i_size that needed to be deleted. In v3.12,
3658 * truncate was changed to update i_size in sync with the extent
3659 * items, but the (useless) orphan item was still created. Since
3660 * v4.18, we don't create the orphan item for truncate at all.
3661 *
3662 * So, this item could mean that we need to do a truncate, but
3663 * only if this filesystem was last used on a pre-v3.12 kernel
3664 * and was not cleanly unmounted. The odds of that are quite
3665 * slim, and it's a pain to do the truncate now, so just delete
3666 * the orphan item.
3667 *
3668 * It's also possible that this orphan item was supposed to be
3669 * deleted but wasn't. The inode number may have been reused,
3670 * but either way, we can delete the orphan item.
3671 */
3672 if (!inode || inode->i_nlink) {
3673 if (inode) {
3674 ret = btrfs_drop_verity_items(BTRFS_I(inode));
3675 iput(inode);
3676 inode = NULL;
3677 if (ret)
3678 goto out;
3679 }
3680 trans = btrfs_start_transaction(root, 1);
3681 if (IS_ERR(trans)) {
3682 ret = PTR_ERR(trans);
3683 goto out;
3684 }
3685 btrfs_debug(fs_info, "auto deleting %Lu",
3686 found_key.objectid);
3687 ret = btrfs_del_orphan_item(trans, root,
3688 found_key.objectid);
3689 btrfs_end_transaction(trans);
3690 if (ret)
3691 goto out;
3692 continue;
3693 }
3694
3695 nr_unlink++;
3696
3697 /* this will do delete_inode and everything for us */
3698 iput(inode);
3699 }
3700 /* release the path since we're done with it */
3701 btrfs_release_path(path);
3702
3703 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
3704 trans = btrfs_join_transaction(root);
3705 if (!IS_ERR(trans))
3706 btrfs_end_transaction(trans);
3707 }
3708
3709 if (nr_unlink)
3710 btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink);
3711
3712 out:
3713 if (ret)
3714 btrfs_err(fs_info, "could not do orphan cleanup %d", ret);
3715 btrfs_free_path(path);
3716 return ret;
3717 }
3718
3719 /*
3720 * very simple check to peek ahead in the leaf looking for xattrs. If we
3721 * don't find any xattrs, we know there can't be any acls.
3722 *
3723 * slot is the slot the inode is in, objectid is the objectid of the inode
3724 */
acls_after_inode_item(struct extent_buffer * leaf,int slot,u64 objectid,int * first_xattr_slot)3725 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3726 int slot, u64 objectid,
3727 int *first_xattr_slot)
3728 {
3729 u32 nritems = btrfs_header_nritems(leaf);
3730 struct btrfs_key found_key;
3731 static u64 xattr_access = 0;
3732 static u64 xattr_default = 0;
3733 int scanned = 0;
3734
3735 if (!xattr_access) {
3736 xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS,
3737 strlen(XATTR_NAME_POSIX_ACL_ACCESS));
3738 xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT,
3739 strlen(XATTR_NAME_POSIX_ACL_DEFAULT));
3740 }
3741
3742 slot++;
3743 *first_xattr_slot = -1;
3744 while (slot < nritems) {
3745 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3746
3747 /* we found a different objectid, there must not be acls */
3748 if (found_key.objectid != objectid)
3749 return 0;
3750
3751 /* we found an xattr, assume we've got an acl */
3752 if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3753 if (*first_xattr_slot == -1)
3754 *first_xattr_slot = slot;
3755 if (found_key.offset == xattr_access ||
3756 found_key.offset == xattr_default)
3757 return 1;
3758 }
3759
3760 /*
3761 * we found a key greater than an xattr key, there can't
3762 * be any acls later on
3763 */
3764 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3765 return 0;
3766
3767 slot++;
3768 scanned++;
3769
3770 /*
3771 * it goes inode, inode backrefs, xattrs, extents,
3772 * so if there are a ton of hard links to an inode there can
3773 * be a lot of backrefs. Don't waste time searching too hard,
3774 * this is just an optimization
3775 */
3776 if (scanned >= 8)
3777 break;
3778 }
3779 /* we hit the end of the leaf before we found an xattr or
3780 * something larger than an xattr. We have to assume the inode
3781 * has acls
3782 */
3783 if (*first_xattr_slot == -1)
3784 *first_xattr_slot = slot;
3785 return 1;
3786 }
3787
btrfs_init_file_extent_tree(struct btrfs_inode * inode)3788 static int btrfs_init_file_extent_tree(struct btrfs_inode *inode)
3789 {
3790 struct btrfs_fs_info *fs_info = inode->root->fs_info;
3791
3792 if (WARN_ON_ONCE(inode->file_extent_tree))
3793 return 0;
3794 if (btrfs_fs_incompat(fs_info, NO_HOLES))
3795 return 0;
3796 if (!S_ISREG(inode->vfs_inode.i_mode))
3797 return 0;
3798 if (btrfs_is_free_space_inode(inode))
3799 return 0;
3800
3801 inode->file_extent_tree = kmalloc(sizeof(struct extent_io_tree), GFP_KERNEL);
3802 if (!inode->file_extent_tree)
3803 return -ENOMEM;
3804
3805 extent_io_tree_init(fs_info, inode->file_extent_tree, IO_TREE_INODE_FILE_EXTENT);
3806 /* Lockdep class is set only for the file extent tree. */
3807 lockdep_set_class(&inode->file_extent_tree->lock, &file_extent_tree_class);
3808
3809 return 0;
3810 }
3811
btrfs_add_inode_to_root(struct btrfs_inode * inode,bool prealloc)3812 static int btrfs_add_inode_to_root(struct btrfs_inode *inode, bool prealloc)
3813 {
3814 struct btrfs_root *root = inode->root;
3815 struct btrfs_inode *existing;
3816 const u64 ino = btrfs_ino(inode);
3817 int ret;
3818
3819 if (inode_unhashed(&inode->vfs_inode))
3820 return 0;
3821
3822 if (prealloc) {
3823 ret = xa_reserve(&root->inodes, ino, GFP_NOFS);
3824 if (ret)
3825 return ret;
3826 }
3827
3828 existing = xa_store(&root->inodes, ino, inode, GFP_ATOMIC);
3829
3830 if (xa_is_err(existing)) {
3831 ret = xa_err(existing);
3832 ASSERT(ret != -EINVAL);
3833 ASSERT(ret != -ENOMEM);
3834 return ret;
3835 } else if (existing) {
3836 WARN_ON(!(existing->vfs_inode.i_state & (I_WILL_FREE | I_FREEING)));
3837 }
3838
3839 return 0;
3840 }
3841
3842 /*
3843 * Read a locked inode from the btree into the in-memory inode and add it to
3844 * its root list/tree.
3845 *
3846 * On failure clean up the inode.
3847 */
btrfs_read_locked_inode(struct inode * inode,struct btrfs_path * path)3848 static int btrfs_read_locked_inode(struct inode *inode, struct btrfs_path *path)
3849 {
3850 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
3851 struct extent_buffer *leaf;
3852 struct btrfs_inode_item *inode_item;
3853 struct btrfs_root *root = BTRFS_I(inode)->root;
3854 struct btrfs_key location;
3855 unsigned long ptr;
3856 int maybe_acls;
3857 u32 rdev;
3858 int ret;
3859 bool filled = false;
3860 int first_xattr_slot;
3861
3862 ret = btrfs_init_file_extent_tree(BTRFS_I(inode));
3863 if (ret)
3864 goto out;
3865
3866 ret = btrfs_fill_inode(inode, &rdev);
3867 if (!ret)
3868 filled = true;
3869
3870 ASSERT(path);
3871
3872 btrfs_get_inode_key(BTRFS_I(inode), &location);
3873
3874 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3875 if (ret) {
3876 /*
3877 * ret > 0 can come from btrfs_search_slot called by
3878 * btrfs_lookup_inode(), this means the inode was not found.
3879 */
3880 if (ret > 0)
3881 ret = -ENOENT;
3882 goto out;
3883 }
3884
3885 leaf = path->nodes[0];
3886
3887 if (filled)
3888 goto cache_index;
3889
3890 inode_item = btrfs_item_ptr(leaf, path->slots[0],
3891 struct btrfs_inode_item);
3892 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3893 set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3894 i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3895 i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3896 btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item));
3897 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
3898 round_up(i_size_read(inode), fs_info->sectorsize));
3899
3900 inode_set_atime(inode, btrfs_timespec_sec(leaf, &inode_item->atime),
3901 btrfs_timespec_nsec(leaf, &inode_item->atime));
3902
3903 inode_set_mtime(inode, btrfs_timespec_sec(leaf, &inode_item->mtime),
3904 btrfs_timespec_nsec(leaf, &inode_item->mtime));
3905
3906 inode_set_ctime(inode, btrfs_timespec_sec(leaf, &inode_item->ctime),
3907 btrfs_timespec_nsec(leaf, &inode_item->ctime));
3908
3909 BTRFS_I(inode)->i_otime_sec = btrfs_timespec_sec(leaf, &inode_item->otime);
3910 BTRFS_I(inode)->i_otime_nsec = btrfs_timespec_nsec(leaf, &inode_item->otime);
3911
3912 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3913 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3914 BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3915
3916 inode_set_iversion_queried(inode,
3917 btrfs_inode_sequence(leaf, inode_item));
3918 inode->i_generation = BTRFS_I(inode)->generation;
3919 inode->i_rdev = 0;
3920 rdev = btrfs_inode_rdev(leaf, inode_item);
3921
3922 if (S_ISDIR(inode->i_mode))
3923 BTRFS_I(inode)->index_cnt = (u64)-1;
3924
3925 btrfs_inode_split_flags(btrfs_inode_flags(leaf, inode_item),
3926 &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
3927
3928 cache_index:
3929 /*
3930 * If we were modified in the current generation and evicted from memory
3931 * and then re-read we need to do a full sync since we don't have any
3932 * idea about which extents were modified before we were evicted from
3933 * cache.
3934 *
3935 * This is required for both inode re-read from disk and delayed inode
3936 * in the delayed_nodes xarray.
3937 */
3938 if (BTRFS_I(inode)->last_trans == btrfs_get_fs_generation(fs_info))
3939 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3940 &BTRFS_I(inode)->runtime_flags);
3941
3942 /*
3943 * We don't persist the id of the transaction where an unlink operation
3944 * against the inode was last made. So here we assume the inode might
3945 * have been evicted, and therefore the exact value of last_unlink_trans
3946 * lost, and set it to last_trans to avoid metadata inconsistencies
3947 * between the inode and its parent if the inode is fsync'ed and the log
3948 * replayed. For example, in the scenario:
3949 *
3950 * touch mydir/foo
3951 * ln mydir/foo mydir/bar
3952 * sync
3953 * unlink mydir/bar
3954 * echo 2 > /proc/sys/vm/drop_caches # evicts inode
3955 * xfs_io -c fsync mydir/foo
3956 * <power failure>
3957 * mount fs, triggers fsync log replay
3958 *
3959 * We must make sure that when we fsync our inode foo we also log its
3960 * parent inode, otherwise after log replay the parent still has the
3961 * dentry with the "bar" name but our inode foo has a link count of 1
3962 * and doesn't have an inode ref with the name "bar" anymore.
3963 *
3964 * Setting last_unlink_trans to last_trans is a pessimistic approach,
3965 * but it guarantees correctness at the expense of occasional full
3966 * transaction commits on fsync if our inode is a directory, or if our
3967 * inode is not a directory, logging its parent unnecessarily.
3968 */
3969 BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
3970
3971 /*
3972 * Same logic as for last_unlink_trans. We don't persist the generation
3973 * of the last transaction where this inode was used for a reflink
3974 * operation, so after eviction and reloading the inode we must be
3975 * pessimistic and assume the last transaction that modified the inode.
3976 */
3977 BTRFS_I(inode)->last_reflink_trans = BTRFS_I(inode)->last_trans;
3978
3979 path->slots[0]++;
3980 if (inode->i_nlink != 1 ||
3981 path->slots[0] >= btrfs_header_nritems(leaf))
3982 goto cache_acl;
3983
3984 btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
3985 if (location.objectid != btrfs_ino(BTRFS_I(inode)))
3986 goto cache_acl;
3987
3988 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3989 if (location.type == BTRFS_INODE_REF_KEY) {
3990 struct btrfs_inode_ref *ref;
3991
3992 ref = (struct btrfs_inode_ref *)ptr;
3993 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
3994 } else if (location.type == BTRFS_INODE_EXTREF_KEY) {
3995 struct btrfs_inode_extref *extref;
3996
3997 extref = (struct btrfs_inode_extref *)ptr;
3998 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
3999 extref);
4000 }
4001 cache_acl:
4002 /*
4003 * try to precache a NULL acl entry for files that don't have
4004 * any xattrs or acls
4005 */
4006 maybe_acls = acls_after_inode_item(leaf, path->slots[0],
4007 btrfs_ino(BTRFS_I(inode)), &first_xattr_slot);
4008 if (first_xattr_slot != -1) {
4009 path->slots[0] = first_xattr_slot;
4010 ret = btrfs_load_inode_props(inode, path);
4011 if (ret)
4012 btrfs_err(fs_info,
4013 "error loading props for ino %llu (root %llu): %d",
4014 btrfs_ino(BTRFS_I(inode)),
4015 btrfs_root_id(root), ret);
4016 }
4017
4018 if (!maybe_acls)
4019 cache_no_acl(inode);
4020
4021 switch (inode->i_mode & S_IFMT) {
4022 case S_IFREG:
4023 inode->i_mapping->a_ops = &btrfs_aops;
4024 inode->i_fop = &btrfs_file_operations;
4025 inode->i_op = &btrfs_file_inode_operations;
4026 break;
4027 case S_IFDIR:
4028 inode->i_fop = &btrfs_dir_file_operations;
4029 inode->i_op = &btrfs_dir_inode_operations;
4030 break;
4031 case S_IFLNK:
4032 inode->i_op = &btrfs_symlink_inode_operations;
4033 inode_nohighmem(inode);
4034 inode->i_mapping->a_ops = &btrfs_aops;
4035 break;
4036 default:
4037 inode->i_op = &btrfs_special_inode_operations;
4038 init_special_inode(inode, inode->i_mode, rdev);
4039 break;
4040 }
4041
4042 btrfs_sync_inode_flags_to_i_flags(inode);
4043
4044 ret = btrfs_add_inode_to_root(BTRFS_I(inode), true);
4045 if (ret)
4046 goto out;
4047
4048 return 0;
4049 out:
4050 iget_failed(inode);
4051 return ret;
4052 }
4053
4054 /*
4055 * given a leaf and an inode, copy the inode fields into the leaf
4056 */
fill_inode_item(struct btrfs_trans_handle * trans,struct extent_buffer * leaf,struct btrfs_inode_item * item,struct inode * inode)4057 static void fill_inode_item(struct btrfs_trans_handle *trans,
4058 struct extent_buffer *leaf,
4059 struct btrfs_inode_item *item,
4060 struct inode *inode)
4061 {
4062 struct btrfs_map_token token;
4063 u64 flags;
4064
4065 btrfs_init_map_token(&token, leaf);
4066
4067 btrfs_set_token_inode_uid(&token, item, i_uid_read(inode));
4068 btrfs_set_token_inode_gid(&token, item, i_gid_read(inode));
4069 btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size);
4070 btrfs_set_token_inode_mode(&token, item, inode->i_mode);
4071 btrfs_set_token_inode_nlink(&token, item, inode->i_nlink);
4072
4073 btrfs_set_token_timespec_sec(&token, &item->atime,
4074 inode_get_atime_sec(inode));
4075 btrfs_set_token_timespec_nsec(&token, &item->atime,
4076 inode_get_atime_nsec(inode));
4077
4078 btrfs_set_token_timespec_sec(&token, &item->mtime,
4079 inode_get_mtime_sec(inode));
4080 btrfs_set_token_timespec_nsec(&token, &item->mtime,
4081 inode_get_mtime_nsec(inode));
4082
4083 btrfs_set_token_timespec_sec(&token, &item->ctime,
4084 inode_get_ctime_sec(inode));
4085 btrfs_set_token_timespec_nsec(&token, &item->ctime,
4086 inode_get_ctime_nsec(inode));
4087
4088 btrfs_set_token_timespec_sec(&token, &item->otime, BTRFS_I(inode)->i_otime_sec);
4089 btrfs_set_token_timespec_nsec(&token, &item->otime, BTRFS_I(inode)->i_otime_nsec);
4090
4091 btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode));
4092 btrfs_set_token_inode_generation(&token, item,
4093 BTRFS_I(inode)->generation);
4094 btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode));
4095 btrfs_set_token_inode_transid(&token, item, trans->transid);
4096 btrfs_set_token_inode_rdev(&token, item, inode->i_rdev);
4097 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
4098 BTRFS_I(inode)->ro_flags);
4099 btrfs_set_token_inode_flags(&token, item, flags);
4100 btrfs_set_token_inode_block_group(&token, item, 0);
4101 }
4102
4103 /*
4104 * copy everything in the in-memory inode into the btree.
4105 */
btrfs_update_inode_item(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)4106 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
4107 struct btrfs_inode *inode)
4108 {
4109 struct btrfs_inode_item *inode_item;
4110 struct btrfs_path *path;
4111 struct extent_buffer *leaf;
4112 struct btrfs_key key;
4113 int ret;
4114
4115 path = btrfs_alloc_path();
4116 if (!path)
4117 return -ENOMEM;
4118
4119 btrfs_get_inode_key(inode, &key);
4120 ret = btrfs_lookup_inode(trans, inode->root, path, &key, 1);
4121 if (ret) {
4122 if (ret > 0)
4123 ret = -ENOENT;
4124 goto failed;
4125 }
4126
4127 leaf = path->nodes[0];
4128 inode_item = btrfs_item_ptr(leaf, path->slots[0],
4129 struct btrfs_inode_item);
4130
4131 fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode);
4132 btrfs_set_inode_last_trans(trans, inode);
4133 ret = 0;
4134 failed:
4135 btrfs_free_path(path);
4136 return ret;
4137 }
4138
4139 /*
4140 * copy everything in the in-memory inode into the btree.
4141 */
btrfs_update_inode(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)4142 int btrfs_update_inode(struct btrfs_trans_handle *trans,
4143 struct btrfs_inode *inode)
4144 {
4145 struct btrfs_root *root = inode->root;
4146 struct btrfs_fs_info *fs_info = root->fs_info;
4147 int ret;
4148
4149 /*
4150 * If the inode is a free space inode, we can deadlock during commit
4151 * if we put it into the delayed code.
4152 *
4153 * The data relocation inode should also be directly updated
4154 * without delay
4155 */
4156 if (!btrfs_is_free_space_inode(inode)
4157 && !btrfs_is_data_reloc_root(root)
4158 && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
4159 btrfs_update_root_times(trans, root);
4160
4161 ret = btrfs_delayed_update_inode(trans, inode);
4162 if (!ret)
4163 btrfs_set_inode_last_trans(trans, inode);
4164 return ret;
4165 }
4166
4167 return btrfs_update_inode_item(trans, inode);
4168 }
4169
btrfs_update_inode_fallback(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)4170 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
4171 struct btrfs_inode *inode)
4172 {
4173 int ret;
4174
4175 ret = btrfs_update_inode(trans, inode);
4176 if (ret == -ENOSPC)
4177 return btrfs_update_inode_item(trans, inode);
4178 return ret;
4179 }
4180
4181 /*
4182 * unlink helper that gets used here in inode.c and in the tree logging
4183 * recovery code. It remove a link in a directory with a given name, and
4184 * also drops the back refs in the inode to the directory
4185 */
__btrfs_unlink_inode(struct btrfs_trans_handle * trans,struct btrfs_inode * dir,struct btrfs_inode * inode,const struct fscrypt_str * name,struct btrfs_rename_ctx * rename_ctx)4186 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4187 struct btrfs_inode *dir,
4188 struct btrfs_inode *inode,
4189 const struct fscrypt_str *name,
4190 struct btrfs_rename_ctx *rename_ctx)
4191 {
4192 struct btrfs_root *root = dir->root;
4193 struct btrfs_fs_info *fs_info = root->fs_info;
4194 struct btrfs_path *path;
4195 int ret = 0;
4196 struct btrfs_dir_item *di;
4197 u64 index;
4198 u64 ino = btrfs_ino(inode);
4199 u64 dir_ino = btrfs_ino(dir);
4200
4201 path = btrfs_alloc_path();
4202 if (!path) {
4203 ret = -ENOMEM;
4204 goto out;
4205 }
4206
4207 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, -1);
4208 if (IS_ERR_OR_NULL(di)) {
4209 ret = di ? PTR_ERR(di) : -ENOENT;
4210 goto err;
4211 }
4212 ret = btrfs_delete_one_dir_name(trans, root, path, di);
4213 if (ret)
4214 goto err;
4215 btrfs_release_path(path);
4216
4217 /*
4218 * If we don't have dir index, we have to get it by looking up
4219 * the inode ref, since we get the inode ref, remove it directly,
4220 * it is unnecessary to do delayed deletion.
4221 *
4222 * But if we have dir index, needn't search inode ref to get it.
4223 * Since the inode ref is close to the inode item, it is better
4224 * that we delay to delete it, and just do this deletion when
4225 * we update the inode item.
4226 */
4227 if (inode->dir_index) {
4228 ret = btrfs_delayed_delete_inode_ref(inode);
4229 if (!ret) {
4230 index = inode->dir_index;
4231 goto skip_backref;
4232 }
4233 }
4234
4235 ret = btrfs_del_inode_ref(trans, root, name, ino, dir_ino, &index);
4236 if (ret) {
4237 btrfs_info(fs_info,
4238 "failed to delete reference to %.*s, inode %llu parent %llu",
4239 name->len, name->name, ino, dir_ino);
4240 btrfs_abort_transaction(trans, ret);
4241 goto err;
4242 }
4243 skip_backref:
4244 if (rename_ctx)
4245 rename_ctx->index = index;
4246
4247 ret = btrfs_delete_delayed_dir_index(trans, dir, index);
4248 if (ret) {
4249 btrfs_abort_transaction(trans, ret);
4250 goto err;
4251 }
4252
4253 /*
4254 * If we are in a rename context, we don't need to update anything in the
4255 * log. That will be done later during the rename by btrfs_log_new_name().
4256 * Besides that, doing it here would only cause extra unnecessary btree
4257 * operations on the log tree, increasing latency for applications.
4258 */
4259 if (!rename_ctx) {
4260 btrfs_del_inode_ref_in_log(trans, root, name, inode, dir_ino);
4261 btrfs_del_dir_entries_in_log(trans, root, name, dir, index);
4262 }
4263
4264 /*
4265 * If we have a pending delayed iput we could end up with the final iput
4266 * being run in btrfs-cleaner context. If we have enough of these built
4267 * up we can end up burning a lot of time in btrfs-cleaner without any
4268 * way to throttle the unlinks. Since we're currently holding a ref on
4269 * the inode we can run the delayed iput here without any issues as the
4270 * final iput won't be done until after we drop the ref we're currently
4271 * holding.
4272 */
4273 btrfs_run_delayed_iput(fs_info, inode);
4274 err:
4275 btrfs_free_path(path);
4276 if (ret)
4277 goto out;
4278
4279 btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2);
4280 inode_inc_iversion(&inode->vfs_inode);
4281 inode_set_ctime_current(&inode->vfs_inode);
4282 inode_inc_iversion(&dir->vfs_inode);
4283 inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode));
4284 ret = btrfs_update_inode(trans, dir);
4285 out:
4286 return ret;
4287 }
4288
btrfs_unlink_inode(struct btrfs_trans_handle * trans,struct btrfs_inode * dir,struct btrfs_inode * inode,const struct fscrypt_str * name)4289 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4290 struct btrfs_inode *dir, struct btrfs_inode *inode,
4291 const struct fscrypt_str *name)
4292 {
4293 int ret;
4294
4295 ret = __btrfs_unlink_inode(trans, dir, inode, name, NULL);
4296 if (!ret) {
4297 drop_nlink(&inode->vfs_inode);
4298 ret = btrfs_update_inode(trans, inode);
4299 }
4300 return ret;
4301 }
4302
4303 /*
4304 * helper to start transaction for unlink and rmdir.
4305 *
4306 * unlink and rmdir are special in btrfs, they do not always free space, so
4307 * if we cannot make our reservations the normal way try and see if there is
4308 * plenty of slack room in the global reserve to migrate, otherwise we cannot
4309 * allow the unlink to occur.
4310 */
__unlink_start_trans(struct btrfs_inode * dir)4311 static struct btrfs_trans_handle *__unlink_start_trans(struct btrfs_inode *dir)
4312 {
4313 struct btrfs_root *root = dir->root;
4314
4315 return btrfs_start_transaction_fallback_global_rsv(root,
4316 BTRFS_UNLINK_METADATA_UNITS);
4317 }
4318
btrfs_unlink(struct inode * dir,struct dentry * dentry)4319 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
4320 {
4321 struct btrfs_trans_handle *trans;
4322 struct inode *inode = d_inode(dentry);
4323 int ret;
4324 struct fscrypt_name fname;
4325
4326 ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
4327 if (ret)
4328 return ret;
4329
4330 /* This needs to handle no-key deletions later on */
4331
4332 trans = __unlink_start_trans(BTRFS_I(dir));
4333 if (IS_ERR(trans)) {
4334 ret = PTR_ERR(trans);
4335 goto fscrypt_free;
4336 }
4337
4338 btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4339 false);
4340
4341 ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4342 &fname.disk_name);
4343 if (ret)
4344 goto end_trans;
4345
4346 if (inode->i_nlink == 0) {
4347 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
4348 if (ret)
4349 goto end_trans;
4350 }
4351
4352 end_trans:
4353 btrfs_end_transaction(trans);
4354 btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info);
4355 fscrypt_free:
4356 fscrypt_free_filename(&fname);
4357 return ret;
4358 }
4359
btrfs_unlink_subvol(struct btrfs_trans_handle * trans,struct btrfs_inode * dir,struct dentry * dentry)4360 static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4361 struct btrfs_inode *dir, struct dentry *dentry)
4362 {
4363 struct btrfs_root *root = dir->root;
4364 struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
4365 struct btrfs_path *path;
4366 struct extent_buffer *leaf;
4367 struct btrfs_dir_item *di;
4368 struct btrfs_key key;
4369 u64 index;
4370 int ret;
4371 u64 objectid;
4372 u64 dir_ino = btrfs_ino(dir);
4373 struct fscrypt_name fname;
4374
4375 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname);
4376 if (ret)
4377 return ret;
4378
4379 /* This needs to handle no-key deletions later on */
4380
4381 if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
4382 objectid = btrfs_root_id(inode->root);
4383 } else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4384 objectid = inode->ref_root_id;
4385 } else {
4386 WARN_ON(1);
4387 fscrypt_free_filename(&fname);
4388 return -EINVAL;
4389 }
4390
4391 path = btrfs_alloc_path();
4392 if (!path) {
4393 ret = -ENOMEM;
4394 goto out;
4395 }
4396
4397 di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4398 &fname.disk_name, -1);
4399 if (IS_ERR_OR_NULL(di)) {
4400 ret = di ? PTR_ERR(di) : -ENOENT;
4401 goto out;
4402 }
4403
4404 leaf = path->nodes[0];
4405 btrfs_dir_item_key_to_cpu(leaf, di, &key);
4406 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
4407 ret = btrfs_delete_one_dir_name(trans, root, path, di);
4408 if (ret) {
4409 btrfs_abort_transaction(trans, ret);
4410 goto out;
4411 }
4412 btrfs_release_path(path);
4413
4414 /*
4415 * This is a placeholder inode for a subvolume we didn't have a
4416 * reference to at the time of the snapshot creation. In the meantime
4417 * we could have renamed the real subvol link into our snapshot, so
4418 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect.
4419 * Instead simply lookup the dir_index_item for this entry so we can
4420 * remove it. Otherwise we know we have a ref to the root and we can
4421 * call btrfs_del_root_ref, and it _shouldn't_ fail.
4422 */
4423 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
4424 di = btrfs_search_dir_index_item(root, path, dir_ino, &fname.disk_name);
4425 if (IS_ERR(di)) {
4426 ret = PTR_ERR(di);
4427 btrfs_abort_transaction(trans, ret);
4428 goto out;
4429 }
4430
4431 leaf = path->nodes[0];
4432 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4433 index = key.offset;
4434 btrfs_release_path(path);
4435 } else {
4436 ret = btrfs_del_root_ref(trans, objectid,
4437 btrfs_root_id(root), dir_ino,
4438 &index, &fname.disk_name);
4439 if (ret) {
4440 btrfs_abort_transaction(trans, ret);
4441 goto out;
4442 }
4443 }
4444
4445 ret = btrfs_delete_delayed_dir_index(trans, dir, index);
4446 if (ret) {
4447 btrfs_abort_transaction(trans, ret);
4448 goto out;
4449 }
4450
4451 btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2);
4452 inode_inc_iversion(&dir->vfs_inode);
4453 inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode));
4454 ret = btrfs_update_inode_fallback(trans, dir);
4455 if (ret)
4456 btrfs_abort_transaction(trans, ret);
4457 out:
4458 btrfs_free_path(path);
4459 fscrypt_free_filename(&fname);
4460 return ret;
4461 }
4462
4463 /*
4464 * Helper to check if the subvolume references other subvolumes or if it's
4465 * default.
4466 */
may_destroy_subvol(struct btrfs_root * root)4467 static noinline int may_destroy_subvol(struct btrfs_root *root)
4468 {
4469 struct btrfs_fs_info *fs_info = root->fs_info;
4470 struct btrfs_path *path;
4471 struct btrfs_dir_item *di;
4472 struct btrfs_key key;
4473 struct fscrypt_str name = FSTR_INIT("default", 7);
4474 u64 dir_id;
4475 int ret;
4476
4477 path = btrfs_alloc_path();
4478 if (!path)
4479 return -ENOMEM;
4480
4481 /* Make sure this root isn't set as the default subvol */
4482 dir_id = btrfs_super_root_dir(fs_info->super_copy);
4483 di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path,
4484 dir_id, &name, 0);
4485 if (di && !IS_ERR(di)) {
4486 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
4487 if (key.objectid == btrfs_root_id(root)) {
4488 ret = -EPERM;
4489 btrfs_err(fs_info,
4490 "deleting default subvolume %llu is not allowed",
4491 key.objectid);
4492 goto out;
4493 }
4494 btrfs_release_path(path);
4495 }
4496
4497 key.objectid = btrfs_root_id(root);
4498 key.type = BTRFS_ROOT_REF_KEY;
4499 key.offset = (u64)-1;
4500
4501 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4502 if (ret < 0)
4503 goto out;
4504 if (ret == 0) {
4505 /*
4506 * Key with offset -1 found, there would have to exist a root
4507 * with such id, but this is out of valid range.
4508 */
4509 ret = -EUCLEAN;
4510 goto out;
4511 }
4512
4513 ret = 0;
4514 if (path->slots[0] > 0) {
4515 path->slots[0]--;
4516 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
4517 if (key.objectid == btrfs_root_id(root) && key.type == BTRFS_ROOT_REF_KEY)
4518 ret = -ENOTEMPTY;
4519 }
4520 out:
4521 btrfs_free_path(path);
4522 return ret;
4523 }
4524
4525 /* Delete all dentries for inodes belonging to the root */
btrfs_prune_dentries(struct btrfs_root * root)4526 static void btrfs_prune_dentries(struct btrfs_root *root)
4527 {
4528 struct btrfs_fs_info *fs_info = root->fs_info;
4529 struct btrfs_inode *inode;
4530 u64 min_ino = 0;
4531
4532 if (!BTRFS_FS_ERROR(fs_info))
4533 WARN_ON(btrfs_root_refs(&root->root_item) != 0);
4534
4535 inode = btrfs_find_first_inode(root, min_ino);
4536 while (inode) {
4537 if (atomic_read(&inode->vfs_inode.i_count) > 1)
4538 d_prune_aliases(&inode->vfs_inode);
4539
4540 min_ino = btrfs_ino(inode) + 1;
4541 /*
4542 * btrfs_drop_inode() will have it removed from the inode
4543 * cache when its usage count hits zero.
4544 */
4545 iput(&inode->vfs_inode);
4546 cond_resched();
4547 inode = btrfs_find_first_inode(root, min_ino);
4548 }
4549 }
4550
btrfs_delete_subvolume(struct btrfs_inode * dir,struct dentry * dentry)4551 int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
4552 {
4553 struct btrfs_root *root = dir->root;
4554 struct btrfs_fs_info *fs_info = root->fs_info;
4555 struct inode *inode = d_inode(dentry);
4556 struct btrfs_root *dest = BTRFS_I(inode)->root;
4557 struct btrfs_trans_handle *trans;
4558 struct btrfs_block_rsv block_rsv;
4559 u64 root_flags;
4560 u64 qgroup_reserved = 0;
4561 int ret;
4562
4563 down_write(&fs_info->subvol_sem);
4564
4565 /*
4566 * Don't allow to delete a subvolume with send in progress. This is
4567 * inside the inode lock so the error handling that has to drop the bit
4568 * again is not run concurrently.
4569 */
4570 spin_lock(&dest->root_item_lock);
4571 if (dest->send_in_progress) {
4572 spin_unlock(&dest->root_item_lock);
4573 btrfs_warn(fs_info,
4574 "attempt to delete subvolume %llu during send",
4575 btrfs_root_id(dest));
4576 ret = -EPERM;
4577 goto out_up_write;
4578 }
4579 if (atomic_read(&dest->nr_swapfiles)) {
4580 spin_unlock(&dest->root_item_lock);
4581 btrfs_warn(fs_info,
4582 "attempt to delete subvolume %llu with active swapfile",
4583 btrfs_root_id(root));
4584 ret = -EPERM;
4585 goto out_up_write;
4586 }
4587 root_flags = btrfs_root_flags(&dest->root_item);
4588 btrfs_set_root_flags(&dest->root_item,
4589 root_flags | BTRFS_ROOT_SUBVOL_DEAD);
4590 spin_unlock(&dest->root_item_lock);
4591
4592 ret = may_destroy_subvol(dest);
4593 if (ret)
4594 goto out_undead;
4595
4596 btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
4597 /*
4598 * One for dir inode,
4599 * two for dir entries,
4600 * two for root ref/backref.
4601 */
4602 ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
4603 if (ret)
4604 goto out_undead;
4605 qgroup_reserved = block_rsv.qgroup_rsv_reserved;
4606
4607 trans = btrfs_start_transaction(root, 0);
4608 if (IS_ERR(trans)) {
4609 ret = PTR_ERR(trans);
4610 goto out_release;
4611 }
4612 btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
4613 qgroup_reserved = 0;
4614 trans->block_rsv = &block_rsv;
4615 trans->bytes_reserved = block_rsv.size;
4616
4617 btrfs_record_snapshot_destroy(trans, dir);
4618
4619 ret = btrfs_unlink_subvol(trans, dir, dentry);
4620 if (ret) {
4621 btrfs_abort_transaction(trans, ret);
4622 goto out_end_trans;
4623 }
4624
4625 ret = btrfs_record_root_in_trans(trans, dest);
4626 if (ret) {
4627 btrfs_abort_transaction(trans, ret);
4628 goto out_end_trans;
4629 }
4630
4631 memset(&dest->root_item.drop_progress, 0,
4632 sizeof(dest->root_item.drop_progress));
4633 btrfs_set_root_drop_level(&dest->root_item, 0);
4634 btrfs_set_root_refs(&dest->root_item, 0);
4635
4636 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
4637 ret = btrfs_insert_orphan_item(trans,
4638 fs_info->tree_root,
4639 btrfs_root_id(dest));
4640 if (ret) {
4641 btrfs_abort_transaction(trans, ret);
4642 goto out_end_trans;
4643 }
4644 }
4645
4646 ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid,
4647 BTRFS_UUID_KEY_SUBVOL, btrfs_root_id(dest));
4648 if (ret && ret != -ENOENT) {
4649 btrfs_abort_transaction(trans, ret);
4650 goto out_end_trans;
4651 }
4652 if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
4653 ret = btrfs_uuid_tree_remove(trans,
4654 dest->root_item.received_uuid,
4655 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4656 btrfs_root_id(dest));
4657 if (ret && ret != -ENOENT) {
4658 btrfs_abort_transaction(trans, ret);
4659 goto out_end_trans;
4660 }
4661 }
4662
4663 free_anon_bdev(dest->anon_dev);
4664 dest->anon_dev = 0;
4665 out_end_trans:
4666 trans->block_rsv = NULL;
4667 trans->bytes_reserved = 0;
4668 ret = btrfs_end_transaction(trans);
4669 inode->i_flags |= S_DEAD;
4670 out_release:
4671 btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL);
4672 if (qgroup_reserved)
4673 btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
4674 out_undead:
4675 if (ret) {
4676 spin_lock(&dest->root_item_lock);
4677 root_flags = btrfs_root_flags(&dest->root_item);
4678 btrfs_set_root_flags(&dest->root_item,
4679 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
4680 spin_unlock(&dest->root_item_lock);
4681 }
4682 out_up_write:
4683 up_write(&fs_info->subvol_sem);
4684 if (!ret) {
4685 d_invalidate(dentry);
4686 btrfs_prune_dentries(dest);
4687 ASSERT(dest->send_in_progress == 0);
4688 }
4689
4690 return ret;
4691 }
4692
btrfs_rmdir(struct inode * dir,struct dentry * dentry)4693 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
4694 {
4695 struct inode *inode = d_inode(dentry);
4696 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
4697 int ret = 0;
4698 struct btrfs_trans_handle *trans;
4699 u64 last_unlink_trans;
4700 struct fscrypt_name fname;
4701
4702 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
4703 return -ENOTEMPTY;
4704 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_FIRST_FREE_OBJECTID) {
4705 if (unlikely(btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))) {
4706 btrfs_err(fs_info,
4707 "extent tree v2 doesn't support snapshot deletion yet");
4708 return -EOPNOTSUPP;
4709 }
4710 return btrfs_delete_subvolume(BTRFS_I(dir), dentry);
4711 }
4712
4713 ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
4714 if (ret)
4715 return ret;
4716
4717 /* This needs to handle no-key deletions later on */
4718
4719 trans = __unlink_start_trans(BTRFS_I(dir));
4720 if (IS_ERR(trans)) {
4721 ret = PTR_ERR(trans);
4722 goto out_notrans;
4723 }
4724
4725 if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4726 ret = btrfs_unlink_subvol(trans, BTRFS_I(dir), dentry);
4727 goto out;
4728 }
4729
4730 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
4731 if (ret)
4732 goto out;
4733
4734 last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
4735
4736 /* now the directory is empty */
4737 ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
4738 &fname.disk_name);
4739 if (!ret) {
4740 btrfs_i_size_write(BTRFS_I(inode), 0);
4741 /*
4742 * Propagate the last_unlink_trans value of the deleted dir to
4743 * its parent directory. This is to prevent an unrecoverable
4744 * log tree in the case we do something like this:
4745 * 1) create dir foo
4746 * 2) create snapshot under dir foo
4747 * 3) delete the snapshot
4748 * 4) rmdir foo
4749 * 5) mkdir foo
4750 * 6) fsync foo or some file inside foo
4751 */
4752 if (last_unlink_trans >= trans->transid)
4753 BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
4754 }
4755 out:
4756 btrfs_end_transaction(trans);
4757 out_notrans:
4758 btrfs_btree_balance_dirty(fs_info);
4759 fscrypt_free_filename(&fname);
4760
4761 return ret;
4762 }
4763
4764 /*
4765 * Read, zero a chunk and write a block.
4766 *
4767 * @inode - inode that we're zeroing
4768 * @from - the offset to start zeroing
4769 * @len - the length to zero, 0 to zero the entire range respective to the
4770 * offset
4771 * @front - zero up to the offset instead of from the offset on
4772 *
4773 * This will find the block for the "from" offset and cow the block and zero the
4774 * part we want to zero. This is used with truncate and hole punching.
4775 */
btrfs_truncate_block(struct btrfs_inode * inode,loff_t from,loff_t len,int front)4776 int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
4777 int front)
4778 {
4779 struct btrfs_fs_info *fs_info = inode->root->fs_info;
4780 struct address_space *mapping = inode->vfs_inode.i_mapping;
4781 struct extent_io_tree *io_tree = &inode->io_tree;
4782 struct btrfs_ordered_extent *ordered;
4783 struct extent_state *cached_state = NULL;
4784 struct extent_changeset *data_reserved = NULL;
4785 bool only_release_metadata = false;
4786 u32 blocksize = fs_info->sectorsize;
4787 pgoff_t index = from >> PAGE_SHIFT;
4788 unsigned offset = from & (blocksize - 1);
4789 struct folio *folio;
4790 gfp_t mask = btrfs_alloc_write_mask(mapping);
4791 size_t write_bytes = blocksize;
4792 int ret = 0;
4793 u64 block_start;
4794 u64 block_end;
4795
4796 if (IS_ALIGNED(offset, blocksize) &&
4797 (!len || IS_ALIGNED(len, blocksize)))
4798 goto out;
4799
4800 block_start = round_down(from, blocksize);
4801 block_end = block_start + blocksize - 1;
4802
4803 ret = btrfs_check_data_free_space(inode, &data_reserved, block_start,
4804 blocksize, false);
4805 if (ret < 0) {
4806 if (btrfs_check_nocow_lock(inode, block_start, &write_bytes, false) > 0) {
4807 /* For nocow case, no need to reserve data space */
4808 only_release_metadata = true;
4809 } else {
4810 goto out;
4811 }
4812 }
4813 ret = btrfs_delalloc_reserve_metadata(inode, blocksize, blocksize, false);
4814 if (ret < 0) {
4815 if (!only_release_metadata)
4816 btrfs_free_reserved_data_space(inode, data_reserved,
4817 block_start, blocksize);
4818 goto out;
4819 }
4820 again:
4821 folio = __filemap_get_folio(mapping, index,
4822 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask);
4823 if (IS_ERR(folio)) {
4824 btrfs_delalloc_release_space(inode, data_reserved, block_start,
4825 blocksize, true);
4826 btrfs_delalloc_release_extents(inode, blocksize);
4827 ret = -ENOMEM;
4828 goto out;
4829 }
4830
4831 if (!folio_test_uptodate(folio)) {
4832 ret = btrfs_read_folio(NULL, folio);
4833 folio_lock(folio);
4834 if (folio->mapping != mapping) {
4835 folio_unlock(folio);
4836 folio_put(folio);
4837 goto again;
4838 }
4839 if (!folio_test_uptodate(folio)) {
4840 ret = -EIO;
4841 goto out_unlock;
4842 }
4843 }
4844
4845 /*
4846 * We unlock the page after the io is completed and then re-lock it
4847 * above. release_folio() could have come in between that and cleared
4848 * folio private, but left the page in the mapping. Set the page mapped
4849 * here to make sure it's properly set for the subpage stuff.
4850 */
4851 ret = set_folio_extent_mapped(folio);
4852 if (ret < 0)
4853 goto out_unlock;
4854
4855 folio_wait_writeback(folio);
4856
4857 lock_extent(io_tree, block_start, block_end, &cached_state);
4858
4859 ordered = btrfs_lookup_ordered_extent(inode, block_start);
4860 if (ordered) {
4861 unlock_extent(io_tree, block_start, block_end, &cached_state);
4862 folio_unlock(folio);
4863 folio_put(folio);
4864 btrfs_start_ordered_extent(ordered);
4865 btrfs_put_ordered_extent(ordered);
4866 goto again;
4867 }
4868
4869 clear_extent_bit(&inode->io_tree, block_start, block_end,
4870 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4871 &cached_state);
4872
4873 ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
4874 &cached_state);
4875 if (ret) {
4876 unlock_extent(io_tree, block_start, block_end, &cached_state);
4877 goto out_unlock;
4878 }
4879
4880 if (offset != blocksize) {
4881 if (!len)
4882 len = blocksize - offset;
4883 if (front)
4884 folio_zero_range(folio, block_start - folio_pos(folio),
4885 offset);
4886 else
4887 folio_zero_range(folio,
4888 (block_start - folio_pos(folio)) + offset,
4889 len);
4890 }
4891 btrfs_folio_clear_checked(fs_info, folio, block_start,
4892 block_end + 1 - block_start);
4893 btrfs_folio_set_dirty(fs_info, folio, block_start,
4894 block_end + 1 - block_start);
4895 unlock_extent(io_tree, block_start, block_end, &cached_state);
4896
4897 if (only_release_metadata)
4898 set_extent_bit(&inode->io_tree, block_start, block_end,
4899 EXTENT_NORESERVE, NULL);
4900
4901 out_unlock:
4902 if (ret) {
4903 if (only_release_metadata)
4904 btrfs_delalloc_release_metadata(inode, blocksize, true);
4905 else
4906 btrfs_delalloc_release_space(inode, data_reserved,
4907 block_start, blocksize, true);
4908 }
4909 btrfs_delalloc_release_extents(inode, blocksize);
4910 folio_unlock(folio);
4911 folio_put(folio);
4912 out:
4913 if (only_release_metadata)
4914 btrfs_check_nocow_unlock(inode);
4915 extent_changeset_free(data_reserved);
4916 return ret;
4917 }
4918
maybe_insert_hole(struct btrfs_inode * inode,u64 offset,u64 len)4919 static int maybe_insert_hole(struct btrfs_inode *inode, u64 offset, u64 len)
4920 {
4921 struct btrfs_root *root = inode->root;
4922 struct btrfs_fs_info *fs_info = root->fs_info;
4923 struct btrfs_trans_handle *trans;
4924 struct btrfs_drop_extents_args drop_args = { 0 };
4925 int ret;
4926
4927 /*
4928 * If NO_HOLES is enabled, we don't need to do anything.
4929 * Later, up in the call chain, either btrfs_set_inode_last_sub_trans()
4930 * or btrfs_update_inode() will be called, which guarantee that the next
4931 * fsync will know this inode was changed and needs to be logged.
4932 */
4933 if (btrfs_fs_incompat(fs_info, NO_HOLES))
4934 return 0;
4935
4936 /*
4937 * 1 - for the one we're dropping
4938 * 1 - for the one we're adding
4939 * 1 - for updating the inode.
4940 */
4941 trans = btrfs_start_transaction(root, 3);
4942 if (IS_ERR(trans))
4943 return PTR_ERR(trans);
4944
4945 drop_args.start = offset;
4946 drop_args.end = offset + len;
4947 drop_args.drop_cache = true;
4948
4949 ret = btrfs_drop_extents(trans, root, inode, &drop_args);
4950 if (ret) {
4951 btrfs_abort_transaction(trans, ret);
4952 btrfs_end_transaction(trans);
4953 return ret;
4954 }
4955
4956 ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset, len);
4957 if (ret) {
4958 btrfs_abort_transaction(trans, ret);
4959 } else {
4960 btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found);
4961 btrfs_update_inode(trans, inode);
4962 }
4963 btrfs_end_transaction(trans);
4964 return ret;
4965 }
4966
4967 /*
4968 * This function puts in dummy file extents for the area we're creating a hole
4969 * for. So if we are truncating this file to a larger size we need to insert
4970 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4971 * the range between oldsize and size
4972 */
btrfs_cont_expand(struct btrfs_inode * inode,loff_t oldsize,loff_t size)4973 int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
4974 {
4975 struct btrfs_root *root = inode->root;
4976 struct btrfs_fs_info *fs_info = root->fs_info;
4977 struct extent_io_tree *io_tree = &inode->io_tree;
4978 struct extent_map *em = NULL;
4979 struct extent_state *cached_state = NULL;
4980 u64 hole_start = ALIGN(oldsize, fs_info->sectorsize);
4981 u64 block_end = ALIGN(size, fs_info->sectorsize);
4982 u64 last_byte;
4983 u64 cur_offset;
4984 u64 hole_size;
4985 int ret = 0;
4986
4987 /*
4988 * If our size started in the middle of a block we need to zero out the
4989 * rest of the block before we expand the i_size, otherwise we could
4990 * expose stale data.
4991 */
4992 ret = btrfs_truncate_block(inode, oldsize, 0, 0);
4993 if (ret)
4994 return ret;
4995
4996 if (size <= hole_start)
4997 return 0;
4998
4999 btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1,
5000 &cached_state);
5001 cur_offset = hole_start;
5002 while (1) {
5003 em = btrfs_get_extent(inode, NULL, cur_offset, block_end - cur_offset);
5004 if (IS_ERR(em)) {
5005 ret = PTR_ERR(em);
5006 em = NULL;
5007 break;
5008 }
5009 last_byte = min(extent_map_end(em), block_end);
5010 last_byte = ALIGN(last_byte, fs_info->sectorsize);
5011 hole_size = last_byte - cur_offset;
5012
5013 if (!(em->flags & EXTENT_FLAG_PREALLOC)) {
5014 struct extent_map *hole_em;
5015
5016 ret = maybe_insert_hole(inode, cur_offset, hole_size);
5017 if (ret)
5018 break;
5019
5020 ret = btrfs_inode_set_file_extent_range(inode,
5021 cur_offset, hole_size);
5022 if (ret)
5023 break;
5024
5025 hole_em = alloc_extent_map();
5026 if (!hole_em) {
5027 btrfs_drop_extent_map_range(inode, cur_offset,
5028 cur_offset + hole_size - 1,
5029 false);
5030 btrfs_set_inode_full_sync(inode);
5031 goto next;
5032 }
5033 hole_em->start = cur_offset;
5034 hole_em->len = hole_size;
5035
5036 hole_em->disk_bytenr = EXTENT_MAP_HOLE;
5037 hole_em->disk_num_bytes = 0;
5038 hole_em->ram_bytes = hole_size;
5039 hole_em->generation = btrfs_get_fs_generation(fs_info);
5040
5041 ret = btrfs_replace_extent_map_range(inode, hole_em, true);
5042 free_extent_map(hole_em);
5043 } else {
5044 ret = btrfs_inode_set_file_extent_range(inode,
5045 cur_offset, hole_size);
5046 if (ret)
5047 break;
5048 }
5049 next:
5050 free_extent_map(em);
5051 em = NULL;
5052 cur_offset = last_byte;
5053 if (cur_offset >= block_end)
5054 break;
5055 }
5056 free_extent_map(em);
5057 unlock_extent(io_tree, hole_start, block_end - 1, &cached_state);
5058 return ret;
5059 }
5060
btrfs_setsize(struct inode * inode,struct iattr * attr)5061 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
5062 {
5063 struct btrfs_root *root = BTRFS_I(inode)->root;
5064 struct btrfs_trans_handle *trans;
5065 loff_t oldsize = i_size_read(inode);
5066 loff_t newsize = attr->ia_size;
5067 int mask = attr->ia_valid;
5068 int ret;
5069
5070 /*
5071 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
5072 * special case where we need to update the times despite not having
5073 * these flags set. For all other operations the VFS set these flags
5074 * explicitly if it wants a timestamp update.
5075 */
5076 if (newsize != oldsize) {
5077 inode_inc_iversion(inode);
5078 if (!(mask & (ATTR_CTIME | ATTR_MTIME))) {
5079 inode_set_mtime_to_ts(inode,
5080 inode_set_ctime_current(inode));
5081 }
5082 }
5083
5084 if (newsize > oldsize) {
5085 /*
5086 * Don't do an expanding truncate while snapshotting is ongoing.
5087 * This is to ensure the snapshot captures a fully consistent
5088 * state of this file - if the snapshot captures this expanding
5089 * truncation, it must capture all writes that happened before
5090 * this truncation.
5091 */
5092 btrfs_drew_write_lock(&root->snapshot_lock);
5093 ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, newsize);
5094 if (ret) {
5095 btrfs_drew_write_unlock(&root->snapshot_lock);
5096 return ret;
5097 }
5098
5099 trans = btrfs_start_transaction(root, 1);
5100 if (IS_ERR(trans)) {
5101 btrfs_drew_write_unlock(&root->snapshot_lock);
5102 return PTR_ERR(trans);
5103 }
5104
5105 i_size_write(inode, newsize);
5106 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
5107 pagecache_isize_extended(inode, oldsize, newsize);
5108 ret = btrfs_update_inode(trans, BTRFS_I(inode));
5109 btrfs_drew_write_unlock(&root->snapshot_lock);
5110 btrfs_end_transaction(trans);
5111 } else {
5112 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
5113
5114 if (btrfs_is_zoned(fs_info)) {
5115 ret = btrfs_wait_ordered_range(BTRFS_I(inode),
5116 ALIGN(newsize, fs_info->sectorsize),
5117 (u64)-1);
5118 if (ret)
5119 return ret;
5120 }
5121
5122 /*
5123 * We're truncating a file that used to have good data down to
5124 * zero. Make sure any new writes to the file get on disk
5125 * on close.
5126 */
5127 if (newsize == 0)
5128 set_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
5129 &BTRFS_I(inode)->runtime_flags);
5130
5131 truncate_setsize(inode, newsize);
5132
5133 inode_dio_wait(inode);
5134
5135 ret = btrfs_truncate(BTRFS_I(inode), newsize == oldsize);
5136 if (ret && inode->i_nlink) {
5137 int err;
5138
5139 /*
5140 * Truncate failed, so fix up the in-memory size. We
5141 * adjusted disk_i_size down as we removed extents, so
5142 * wait for disk_i_size to be stable and then update the
5143 * in-memory size to match.
5144 */
5145 err = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
5146 if (err)
5147 return err;
5148 i_size_write(inode, BTRFS_I(inode)->disk_i_size);
5149 }
5150 }
5151
5152 return ret;
5153 }
5154
btrfs_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)5155 static int btrfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
5156 struct iattr *attr)
5157 {
5158 struct inode *inode = d_inode(dentry);
5159 struct btrfs_root *root = BTRFS_I(inode)->root;
5160 int err;
5161
5162 if (btrfs_root_readonly(root))
5163 return -EROFS;
5164
5165 err = setattr_prepare(idmap, dentry, attr);
5166 if (err)
5167 return err;
5168
5169 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
5170 err = btrfs_setsize(inode, attr);
5171 if (err)
5172 return err;
5173 }
5174
5175 if (attr->ia_valid) {
5176 setattr_copy(idmap, inode, attr);
5177 inode_inc_iversion(inode);
5178 err = btrfs_dirty_inode(BTRFS_I(inode));
5179
5180 if (!err && attr->ia_valid & ATTR_MODE)
5181 err = posix_acl_chmod(idmap, dentry, inode->i_mode);
5182 }
5183
5184 return err;
5185 }
5186
5187 /*
5188 * While truncating the inode pages during eviction, we get the VFS
5189 * calling btrfs_invalidate_folio() against each folio of the inode. This
5190 * is slow because the calls to btrfs_invalidate_folio() result in a
5191 * huge amount of calls to lock_extent() and clear_extent_bit(),
5192 * which keep merging and splitting extent_state structures over and over,
5193 * wasting lots of time.
5194 *
5195 * Therefore if the inode is being evicted, let btrfs_invalidate_folio()
5196 * skip all those expensive operations on a per folio basis and do only
5197 * the ordered io finishing, while we release here the extent_map and
5198 * extent_state structures, without the excessive merging and splitting.
5199 */
evict_inode_truncate_pages(struct inode * inode)5200 static void evict_inode_truncate_pages(struct inode *inode)
5201 {
5202 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5203 struct rb_node *node;
5204
5205 ASSERT(inode->i_state & I_FREEING);
5206 truncate_inode_pages_final(&inode->i_data);
5207
5208 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
5209
5210 /*
5211 * Keep looping until we have no more ranges in the io tree.
5212 * We can have ongoing bios started by readahead that have
5213 * their endio callback (extent_io.c:end_bio_extent_readpage)
5214 * still in progress (unlocked the pages in the bio but did not yet
5215 * unlocked the ranges in the io tree). Therefore this means some
5216 * ranges can still be locked and eviction started because before
5217 * submitting those bios, which are executed by a separate task (work
5218 * queue kthread), inode references (inode->i_count) were not taken
5219 * (which would be dropped in the end io callback of each bio).
5220 * Therefore here we effectively end up waiting for those bios and
5221 * anyone else holding locked ranges without having bumped the inode's
5222 * reference count - if we don't do it, when they access the inode's
5223 * io_tree to unlock a range it may be too late, leading to an
5224 * use-after-free issue.
5225 */
5226 spin_lock(&io_tree->lock);
5227 while (!RB_EMPTY_ROOT(&io_tree->state)) {
5228 struct extent_state *state;
5229 struct extent_state *cached_state = NULL;
5230 u64 start;
5231 u64 end;
5232 unsigned state_flags;
5233
5234 node = rb_first(&io_tree->state);
5235 state = rb_entry(node, struct extent_state, rb_node);
5236 start = state->start;
5237 end = state->end;
5238 state_flags = state->state;
5239 spin_unlock(&io_tree->lock);
5240
5241 lock_extent(io_tree, start, end, &cached_state);
5242
5243 /*
5244 * If still has DELALLOC flag, the extent didn't reach disk,
5245 * and its reserved space won't be freed by delayed_ref.
5246 * So we need to free its reserved space here.
5247 * (Refer to comment in btrfs_invalidate_folio, case 2)
5248 *
5249 * Note, end is the bytenr of last byte, so we need + 1 here.
5250 */
5251 if (state_flags & EXTENT_DELALLOC)
5252 btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start,
5253 end - start + 1, NULL);
5254
5255 clear_extent_bit(io_tree, start, end,
5256 EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING,
5257 &cached_state);
5258
5259 cond_resched();
5260 spin_lock(&io_tree->lock);
5261 }
5262 spin_unlock(&io_tree->lock);
5263 }
5264
evict_refill_and_join(struct btrfs_root * root,struct btrfs_block_rsv * rsv)5265 static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
5266 struct btrfs_block_rsv *rsv)
5267 {
5268 struct btrfs_fs_info *fs_info = root->fs_info;
5269 struct btrfs_trans_handle *trans;
5270 u64 delayed_refs_extra = btrfs_calc_delayed_ref_bytes(fs_info, 1);
5271 int ret;
5272
5273 /*
5274 * Eviction should be taking place at some place safe because of our
5275 * delayed iputs. However the normal flushing code will run delayed
5276 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock.
5277 *
5278 * We reserve the delayed_refs_extra here again because we can't use
5279 * btrfs_start_transaction(root, 0) for the same deadlocky reason as
5280 * above. We reserve our extra bit here because we generate a ton of
5281 * delayed refs activity by truncating.
5282 *
5283 * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can,
5284 * if we fail to make this reservation we can re-try without the
5285 * delayed_refs_extra so we can make some forward progress.
5286 */
5287 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size + delayed_refs_extra,
5288 BTRFS_RESERVE_FLUSH_EVICT);
5289 if (ret) {
5290 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size,
5291 BTRFS_RESERVE_FLUSH_EVICT);
5292 if (ret) {
5293 btrfs_warn(fs_info,
5294 "could not allocate space for delete; will truncate on mount");
5295 return ERR_PTR(-ENOSPC);
5296 }
5297 delayed_refs_extra = 0;
5298 }
5299
5300 trans = btrfs_join_transaction(root);
5301 if (IS_ERR(trans))
5302 return trans;
5303
5304 if (delayed_refs_extra) {
5305 trans->block_rsv = &fs_info->trans_block_rsv;
5306 trans->bytes_reserved = delayed_refs_extra;
5307 btrfs_block_rsv_migrate(rsv, trans->block_rsv,
5308 delayed_refs_extra, true);
5309 }
5310 return trans;
5311 }
5312
btrfs_evict_inode(struct inode * inode)5313 void btrfs_evict_inode(struct inode *inode)
5314 {
5315 struct btrfs_fs_info *fs_info;
5316 struct btrfs_trans_handle *trans;
5317 struct btrfs_root *root = BTRFS_I(inode)->root;
5318 struct btrfs_block_rsv *rsv = NULL;
5319 int ret;
5320
5321 trace_btrfs_inode_evict(inode);
5322
5323 if (!root) {
5324 fsverity_cleanup_inode(inode);
5325 clear_inode(inode);
5326 return;
5327 }
5328
5329 fs_info = inode_to_fs_info(inode);
5330 evict_inode_truncate_pages(inode);
5331
5332 if (inode->i_nlink &&
5333 ((btrfs_root_refs(&root->root_item) != 0 &&
5334 btrfs_root_id(root) != BTRFS_ROOT_TREE_OBJECTID) ||
5335 btrfs_is_free_space_inode(BTRFS_I(inode))))
5336 goto out;
5337
5338 if (is_bad_inode(inode))
5339 goto out;
5340
5341 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
5342 goto out;
5343
5344 if (inode->i_nlink > 0) {
5345 BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
5346 btrfs_root_id(root) != BTRFS_ROOT_TREE_OBJECTID);
5347 goto out;
5348 }
5349
5350 /*
5351 * This makes sure the inode item in tree is uptodate and the space for
5352 * the inode update is released.
5353 */
5354 ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode));
5355 if (ret)
5356 goto out;
5357
5358 /*
5359 * This drops any pending insert or delete operations we have for this
5360 * inode. We could have a delayed dir index deletion queued up, but
5361 * we're removing the inode completely so that'll be taken care of in
5362 * the truncate.
5363 */
5364 btrfs_kill_delayed_inode_items(BTRFS_I(inode));
5365
5366 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
5367 if (!rsv)
5368 goto out;
5369 rsv->size = btrfs_calc_metadata_size(fs_info, 1);
5370 rsv->failfast = true;
5371
5372 btrfs_i_size_write(BTRFS_I(inode), 0);
5373
5374 while (1) {
5375 struct btrfs_truncate_control control = {
5376 .inode = BTRFS_I(inode),
5377 .ino = btrfs_ino(BTRFS_I(inode)),
5378 .new_size = 0,
5379 .min_type = 0,
5380 };
5381
5382 trans = evict_refill_and_join(root, rsv);
5383 if (IS_ERR(trans))
5384 goto out;
5385
5386 trans->block_rsv = rsv;
5387
5388 ret = btrfs_truncate_inode_items(trans, root, &control);
5389 trans->block_rsv = &fs_info->trans_block_rsv;
5390 btrfs_end_transaction(trans);
5391 /*
5392 * We have not added new delayed items for our inode after we
5393 * have flushed its delayed items, so no need to throttle on
5394 * delayed items. However we have modified extent buffers.
5395 */
5396 btrfs_btree_balance_dirty_nodelay(fs_info);
5397 if (ret && ret != -ENOSPC && ret != -EAGAIN)
5398 goto out;
5399 else if (!ret)
5400 break;
5401 }
5402
5403 /*
5404 * Errors here aren't a big deal, it just means we leave orphan items in
5405 * the tree. They will be cleaned up on the next mount. If the inode
5406 * number gets reused, cleanup deletes the orphan item without doing
5407 * anything, and unlink reuses the existing orphan item.
5408 *
5409 * If it turns out that we are dropping too many of these, we might want
5410 * to add a mechanism for retrying these after a commit.
5411 */
5412 trans = evict_refill_and_join(root, rsv);
5413 if (!IS_ERR(trans)) {
5414 trans->block_rsv = rsv;
5415 btrfs_orphan_del(trans, BTRFS_I(inode));
5416 trans->block_rsv = &fs_info->trans_block_rsv;
5417 btrfs_end_transaction(trans);
5418 }
5419
5420 out:
5421 btrfs_free_block_rsv(fs_info, rsv);
5422 /*
5423 * If we didn't successfully delete, the orphan item will still be in
5424 * the tree and we'll retry on the next mount. Again, we might also want
5425 * to retry these periodically in the future.
5426 */
5427 btrfs_remove_delayed_node(BTRFS_I(inode));
5428 fsverity_cleanup_inode(inode);
5429 clear_inode(inode);
5430 }
5431
5432 /*
5433 * Return the key found in the dir entry in the location pointer, fill @type
5434 * with BTRFS_FT_*, and return 0.
5435 *
5436 * If no dir entries were found, returns -ENOENT.
5437 * If found a corrupted location in dir entry, returns -EUCLEAN.
5438 */
btrfs_inode_by_name(struct btrfs_inode * dir,struct dentry * dentry,struct btrfs_key * location,u8 * type)5439 static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry,
5440 struct btrfs_key *location, u8 *type)
5441 {
5442 struct btrfs_dir_item *di;
5443 struct btrfs_path *path;
5444 struct btrfs_root *root = dir->root;
5445 int ret = 0;
5446 struct fscrypt_name fname;
5447
5448 path = btrfs_alloc_path();
5449 if (!path)
5450 return -ENOMEM;
5451
5452 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname);
5453 if (ret < 0)
5454 goto out;
5455 /*
5456 * fscrypt_setup_filename() should never return a positive value, but
5457 * gcc on sparc/parisc thinks it can, so assert that doesn't happen.
5458 */
5459 ASSERT(ret == 0);
5460
5461 /* This needs to handle no-key deletions later on */
5462
5463 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir),
5464 &fname.disk_name, 0);
5465 if (IS_ERR_OR_NULL(di)) {
5466 ret = di ? PTR_ERR(di) : -ENOENT;
5467 goto out;
5468 }
5469
5470 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
5471 if (location->type != BTRFS_INODE_ITEM_KEY &&
5472 location->type != BTRFS_ROOT_ITEM_KEY) {
5473 ret = -EUCLEAN;
5474 btrfs_warn(root->fs_info,
5475 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
5476 __func__, fname.disk_name.name, btrfs_ino(dir),
5477 location->objectid, location->type, location->offset);
5478 }
5479 if (!ret)
5480 *type = btrfs_dir_ftype(path->nodes[0], di);
5481 out:
5482 fscrypt_free_filename(&fname);
5483 btrfs_free_path(path);
5484 return ret;
5485 }
5486
5487 /*
5488 * when we hit a tree root in a directory, the btrfs part of the inode
5489 * needs to be changed to reflect the root directory of the tree root. This
5490 * is kind of like crossing a mount point.
5491 */
fixup_tree_root_location(struct btrfs_fs_info * fs_info,struct btrfs_inode * dir,struct dentry * dentry,struct btrfs_key * location,struct btrfs_root ** sub_root)5492 static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
5493 struct btrfs_inode *dir,
5494 struct dentry *dentry,
5495 struct btrfs_key *location,
5496 struct btrfs_root **sub_root)
5497 {
5498 struct btrfs_path *path;
5499 struct btrfs_root *new_root;
5500 struct btrfs_root_ref *ref;
5501 struct extent_buffer *leaf;
5502 struct btrfs_key key;
5503 int ret;
5504 int err = 0;
5505 struct fscrypt_name fname;
5506
5507 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 0, &fname);
5508 if (ret)
5509 return ret;
5510
5511 path = btrfs_alloc_path();
5512 if (!path) {
5513 err = -ENOMEM;
5514 goto out;
5515 }
5516
5517 err = -ENOENT;
5518 key.objectid = btrfs_root_id(dir->root);
5519 key.type = BTRFS_ROOT_REF_KEY;
5520 key.offset = location->objectid;
5521
5522 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
5523 if (ret) {
5524 if (ret < 0)
5525 err = ret;
5526 goto out;
5527 }
5528
5529 leaf = path->nodes[0];
5530 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
5531 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
5532 btrfs_root_ref_name_len(leaf, ref) != fname.disk_name.len)
5533 goto out;
5534
5535 ret = memcmp_extent_buffer(leaf, fname.disk_name.name,
5536 (unsigned long)(ref + 1), fname.disk_name.len);
5537 if (ret)
5538 goto out;
5539
5540 btrfs_release_path(path);
5541
5542 new_root = btrfs_get_fs_root(fs_info, location->objectid, true);
5543 if (IS_ERR(new_root)) {
5544 err = PTR_ERR(new_root);
5545 goto out;
5546 }
5547
5548 *sub_root = new_root;
5549 location->objectid = btrfs_root_dirid(&new_root->root_item);
5550 location->type = BTRFS_INODE_ITEM_KEY;
5551 location->offset = 0;
5552 err = 0;
5553 out:
5554 btrfs_free_path(path);
5555 fscrypt_free_filename(&fname);
5556 return err;
5557 }
5558
5559
5560
btrfs_del_inode_from_root(struct btrfs_inode * inode)5561 static void btrfs_del_inode_from_root(struct btrfs_inode *inode)
5562 {
5563 struct btrfs_root *root = inode->root;
5564 struct btrfs_inode *entry;
5565 bool empty = false;
5566
5567 xa_lock(&root->inodes);
5568 entry = __xa_erase(&root->inodes, btrfs_ino(inode));
5569 if (entry == inode)
5570 empty = xa_empty(&root->inodes);
5571 xa_unlock(&root->inodes);
5572
5573 if (empty && btrfs_root_refs(&root->root_item) == 0) {
5574 xa_lock(&root->inodes);
5575 empty = xa_empty(&root->inodes);
5576 xa_unlock(&root->inodes);
5577 if (empty)
5578 btrfs_add_dead_root(root);
5579 }
5580 }
5581
5582
btrfs_init_locked_inode(struct inode * inode,void * p)5583 static int btrfs_init_locked_inode(struct inode *inode, void *p)
5584 {
5585 struct btrfs_iget_args *args = p;
5586
5587 btrfs_set_inode_number(BTRFS_I(inode), args->ino);
5588 BTRFS_I(inode)->root = btrfs_grab_root(args->root);
5589
5590 if (args->root && args->root == args->root->fs_info->tree_root &&
5591 args->ino != BTRFS_BTREE_INODE_OBJECTID)
5592 set_bit(BTRFS_INODE_FREE_SPACE_INODE,
5593 &BTRFS_I(inode)->runtime_flags);
5594 return 0;
5595 }
5596
btrfs_find_actor(struct inode * inode,void * opaque)5597 static int btrfs_find_actor(struct inode *inode, void *opaque)
5598 {
5599 struct btrfs_iget_args *args = opaque;
5600
5601 return args->ino == btrfs_ino(BTRFS_I(inode)) &&
5602 args->root == BTRFS_I(inode)->root;
5603 }
5604
btrfs_iget_locked(u64 ino,struct btrfs_root * root)5605 static struct inode *btrfs_iget_locked(u64 ino, struct btrfs_root *root)
5606 {
5607 struct inode *inode;
5608 struct btrfs_iget_args args;
5609 unsigned long hashval = btrfs_inode_hash(ino, root);
5610
5611 args.ino = ino;
5612 args.root = root;
5613
5614 inode = iget5_locked_rcu(root->fs_info->sb, hashval, btrfs_find_actor,
5615 btrfs_init_locked_inode,
5616 (void *)&args);
5617 return inode;
5618 }
5619
5620 /*
5621 * Get an inode object given its inode number and corresponding root. Path is
5622 * preallocated to prevent recursing back to iget through allocator.
5623 */
btrfs_iget_path(u64 ino,struct btrfs_root * root,struct btrfs_path * path)5624 struct inode *btrfs_iget_path(u64 ino, struct btrfs_root *root,
5625 struct btrfs_path *path)
5626 {
5627 struct inode *inode;
5628 int ret;
5629
5630 inode = btrfs_iget_locked(ino, root);
5631 if (!inode)
5632 return ERR_PTR(-ENOMEM);
5633
5634 if (!(inode->i_state & I_NEW))
5635 return inode;
5636
5637 ret = btrfs_read_locked_inode(inode, path);
5638 if (ret)
5639 return ERR_PTR(ret);
5640
5641 unlock_new_inode(inode);
5642 return inode;
5643 }
5644
5645 /*
5646 * Get an inode object given its inode number and corresponding root.
5647 */
btrfs_iget(u64 ino,struct btrfs_root * root)5648 struct inode *btrfs_iget(u64 ino, struct btrfs_root *root)
5649 {
5650 struct inode *inode;
5651 struct btrfs_path *path;
5652 int ret;
5653
5654 inode = btrfs_iget_locked(ino, root);
5655 if (!inode)
5656 return ERR_PTR(-ENOMEM);
5657
5658 if (!(inode->i_state & I_NEW))
5659 return inode;
5660
5661 path = btrfs_alloc_path();
5662 if (!path)
5663 return ERR_PTR(-ENOMEM);
5664
5665 ret = btrfs_read_locked_inode(inode, path);
5666 btrfs_free_path(path);
5667 if (ret)
5668 return ERR_PTR(ret);
5669
5670 unlock_new_inode(inode);
5671 return inode;
5672 }
5673
new_simple_dir(struct inode * dir,struct btrfs_key * key,struct btrfs_root * root)5674 static struct inode *new_simple_dir(struct inode *dir,
5675 struct btrfs_key *key,
5676 struct btrfs_root *root)
5677 {
5678 struct timespec64 ts;
5679 struct inode *inode = new_inode(dir->i_sb);
5680
5681 if (!inode)
5682 return ERR_PTR(-ENOMEM);
5683
5684 BTRFS_I(inode)->root = btrfs_grab_root(root);
5685 BTRFS_I(inode)->ref_root_id = key->objectid;
5686 set_bit(BTRFS_INODE_ROOT_STUB, &BTRFS_I(inode)->runtime_flags);
5687 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
5688
5689 btrfs_set_inode_number(BTRFS_I(inode), BTRFS_EMPTY_SUBVOL_DIR_OBJECTID);
5690 /*
5691 * We only need lookup, the rest is read-only and there's no inode
5692 * associated with the dentry
5693 */
5694 inode->i_op = &simple_dir_inode_operations;
5695 inode->i_opflags &= ~IOP_XATTR;
5696 inode->i_fop = &simple_dir_operations;
5697 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5698
5699 ts = inode_set_ctime_current(inode);
5700 inode_set_mtime_to_ts(inode, ts);
5701 inode_set_atime_to_ts(inode, inode_get_atime(dir));
5702 BTRFS_I(inode)->i_otime_sec = ts.tv_sec;
5703 BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec;
5704
5705 inode->i_uid = dir->i_uid;
5706 inode->i_gid = dir->i_gid;
5707
5708 return inode;
5709 }
5710
5711 static_assert(BTRFS_FT_UNKNOWN == FT_UNKNOWN);
5712 static_assert(BTRFS_FT_REG_FILE == FT_REG_FILE);
5713 static_assert(BTRFS_FT_DIR == FT_DIR);
5714 static_assert(BTRFS_FT_CHRDEV == FT_CHRDEV);
5715 static_assert(BTRFS_FT_BLKDEV == FT_BLKDEV);
5716 static_assert(BTRFS_FT_FIFO == FT_FIFO);
5717 static_assert(BTRFS_FT_SOCK == FT_SOCK);
5718 static_assert(BTRFS_FT_SYMLINK == FT_SYMLINK);
5719
btrfs_inode_type(struct inode * inode)5720 static inline u8 btrfs_inode_type(struct inode *inode)
5721 {
5722 return fs_umode_to_ftype(inode->i_mode);
5723 }
5724
btrfs_lookup_dentry(struct inode * dir,struct dentry * dentry)5725 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
5726 {
5727 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
5728 struct inode *inode;
5729 struct btrfs_root *root = BTRFS_I(dir)->root;
5730 struct btrfs_root *sub_root = root;
5731 struct btrfs_key location = { 0 };
5732 u8 di_type = 0;
5733 int ret = 0;
5734
5735 if (dentry->d_name.len > BTRFS_NAME_LEN)
5736 return ERR_PTR(-ENAMETOOLONG);
5737
5738 ret = btrfs_inode_by_name(BTRFS_I(dir), dentry, &location, &di_type);
5739 if (ret < 0)
5740 return ERR_PTR(ret);
5741
5742 if (location.type == BTRFS_INODE_ITEM_KEY) {
5743 inode = btrfs_iget(location.objectid, root);
5744 if (IS_ERR(inode))
5745 return inode;
5746
5747 /* Do extra check against inode mode with di_type */
5748 if (btrfs_inode_type(inode) != di_type) {
5749 btrfs_crit(fs_info,
5750 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u",
5751 inode->i_mode, btrfs_inode_type(inode),
5752 di_type);
5753 iput(inode);
5754 return ERR_PTR(-EUCLEAN);
5755 }
5756 return inode;
5757 }
5758
5759 ret = fixup_tree_root_location(fs_info, BTRFS_I(dir), dentry,
5760 &location, &sub_root);
5761 if (ret < 0) {
5762 if (ret != -ENOENT)
5763 inode = ERR_PTR(ret);
5764 else
5765 inode = new_simple_dir(dir, &location, root);
5766 } else {
5767 inode = btrfs_iget(location.objectid, sub_root);
5768 btrfs_put_root(sub_root);
5769
5770 if (IS_ERR(inode))
5771 return inode;
5772
5773 down_read(&fs_info->cleanup_work_sem);
5774 if (!sb_rdonly(inode->i_sb))
5775 ret = btrfs_orphan_cleanup(sub_root);
5776 up_read(&fs_info->cleanup_work_sem);
5777 if (ret) {
5778 iput(inode);
5779 inode = ERR_PTR(ret);
5780 }
5781 }
5782
5783 return inode;
5784 }
5785
btrfs_dentry_delete(const struct dentry * dentry)5786 static int btrfs_dentry_delete(const struct dentry *dentry)
5787 {
5788 struct btrfs_root *root;
5789 struct inode *inode = d_inode(dentry);
5790
5791 if (!inode && !IS_ROOT(dentry))
5792 inode = d_inode(dentry->d_parent);
5793
5794 if (inode) {
5795 root = BTRFS_I(inode)->root;
5796 if (btrfs_root_refs(&root->root_item) == 0)
5797 return 1;
5798
5799 if (btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5800 return 1;
5801 }
5802 return 0;
5803 }
5804
btrfs_lookup(struct inode * dir,struct dentry * dentry,unsigned int flags)5805 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
5806 unsigned int flags)
5807 {
5808 struct inode *inode = btrfs_lookup_dentry(dir, dentry);
5809
5810 if (inode == ERR_PTR(-ENOENT))
5811 inode = NULL;
5812 return d_splice_alias(inode, dentry);
5813 }
5814
5815 /*
5816 * Find the highest existing sequence number in a directory and then set the
5817 * in-memory index_cnt variable to the first free sequence number.
5818 */
btrfs_set_inode_index_count(struct btrfs_inode * inode)5819 static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
5820 {
5821 struct btrfs_root *root = inode->root;
5822 struct btrfs_key key, found_key;
5823 struct btrfs_path *path;
5824 struct extent_buffer *leaf;
5825 int ret;
5826
5827 key.objectid = btrfs_ino(inode);
5828 key.type = BTRFS_DIR_INDEX_KEY;
5829 key.offset = (u64)-1;
5830
5831 path = btrfs_alloc_path();
5832 if (!path)
5833 return -ENOMEM;
5834
5835 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5836 if (ret < 0)
5837 goto out;
5838 /* FIXME: we should be able to handle this */
5839 if (ret == 0)
5840 goto out;
5841 ret = 0;
5842
5843 if (path->slots[0] == 0) {
5844 inode->index_cnt = BTRFS_DIR_START_INDEX;
5845 goto out;
5846 }
5847
5848 path->slots[0]--;
5849
5850 leaf = path->nodes[0];
5851 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5852
5853 if (found_key.objectid != btrfs_ino(inode) ||
5854 found_key.type != BTRFS_DIR_INDEX_KEY) {
5855 inode->index_cnt = BTRFS_DIR_START_INDEX;
5856 goto out;
5857 }
5858
5859 inode->index_cnt = found_key.offset + 1;
5860 out:
5861 btrfs_free_path(path);
5862 return ret;
5863 }
5864
btrfs_get_dir_last_index(struct btrfs_inode * dir,u64 * index)5865 static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index)
5866 {
5867 int ret = 0;
5868
5869 btrfs_inode_lock(dir, 0);
5870 if (dir->index_cnt == (u64)-1) {
5871 ret = btrfs_inode_delayed_dir_index_count(dir);
5872 if (ret) {
5873 ret = btrfs_set_inode_index_count(dir);
5874 if (ret)
5875 goto out;
5876 }
5877 }
5878
5879 /* index_cnt is the index number of next new entry, so decrement it. */
5880 *index = dir->index_cnt - 1;
5881 out:
5882 btrfs_inode_unlock(dir, 0);
5883
5884 return ret;
5885 }
5886
5887 /*
5888 * All this infrastructure exists because dir_emit can fault, and we are holding
5889 * the tree lock when doing readdir. For now just allocate a buffer and copy
5890 * our information into that, and then dir_emit from the buffer. This is
5891 * similar to what NFS does, only we don't keep the buffer around in pagecache
5892 * because I'm afraid I'll mess that up. Long term we need to make filldir do
5893 * copy_to_user_inatomic so we don't have to worry about page faulting under the
5894 * tree lock.
5895 */
btrfs_opendir(struct inode * inode,struct file * file)5896 static int btrfs_opendir(struct inode *inode, struct file *file)
5897 {
5898 struct btrfs_file_private *private;
5899 u64 last_index;
5900 int ret;
5901
5902 ret = btrfs_get_dir_last_index(BTRFS_I(inode), &last_index);
5903 if (ret)
5904 return ret;
5905
5906 private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL);
5907 if (!private)
5908 return -ENOMEM;
5909 private->last_index = last_index;
5910 private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
5911 if (!private->filldir_buf) {
5912 kfree(private);
5913 return -ENOMEM;
5914 }
5915 file->private_data = private;
5916 return 0;
5917 }
5918
btrfs_dir_llseek(struct file * file,loff_t offset,int whence)5919 static loff_t btrfs_dir_llseek(struct file *file, loff_t offset, int whence)
5920 {
5921 struct btrfs_file_private *private = file->private_data;
5922 int ret;
5923
5924 ret = btrfs_get_dir_last_index(BTRFS_I(file_inode(file)),
5925 &private->last_index);
5926 if (ret)
5927 return ret;
5928
5929 return generic_file_llseek(file, offset, whence);
5930 }
5931
5932 struct dir_entry {
5933 u64 ino;
5934 u64 offset;
5935 unsigned type;
5936 int name_len;
5937 };
5938
btrfs_filldir(void * addr,int entries,struct dir_context * ctx)5939 static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
5940 {
5941 while (entries--) {
5942 struct dir_entry *entry = addr;
5943 char *name = (char *)(entry + 1);
5944
5945 ctx->pos = get_unaligned(&entry->offset);
5946 if (!dir_emit(ctx, name, get_unaligned(&entry->name_len),
5947 get_unaligned(&entry->ino),
5948 get_unaligned(&entry->type)))
5949 return 1;
5950 addr += sizeof(struct dir_entry) +
5951 get_unaligned(&entry->name_len);
5952 ctx->pos++;
5953 }
5954 return 0;
5955 }
5956
btrfs_real_readdir(struct file * file,struct dir_context * ctx)5957 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5958 {
5959 struct inode *inode = file_inode(file);
5960 struct btrfs_root *root = BTRFS_I(inode)->root;
5961 struct btrfs_file_private *private = file->private_data;
5962 struct btrfs_dir_item *di;
5963 struct btrfs_key key;
5964 struct btrfs_key found_key;
5965 struct btrfs_path *path;
5966 void *addr;
5967 LIST_HEAD(ins_list);
5968 LIST_HEAD(del_list);
5969 int ret;
5970 char *name_ptr;
5971 int name_len;
5972 int entries = 0;
5973 int total_len = 0;
5974 bool put = false;
5975 struct btrfs_key location;
5976
5977 if (!dir_emit_dots(file, ctx))
5978 return 0;
5979
5980 path = btrfs_alloc_path();
5981 if (!path)
5982 return -ENOMEM;
5983
5984 addr = private->filldir_buf;
5985 path->reada = READA_FORWARD;
5986
5987 put = btrfs_readdir_get_delayed_items(BTRFS_I(inode), private->last_index,
5988 &ins_list, &del_list);
5989
5990 again:
5991 key.type = BTRFS_DIR_INDEX_KEY;
5992 key.offset = ctx->pos;
5993 key.objectid = btrfs_ino(BTRFS_I(inode));
5994
5995 btrfs_for_each_slot(root, &key, &found_key, path, ret) {
5996 struct dir_entry *entry;
5997 struct extent_buffer *leaf = path->nodes[0];
5998 u8 ftype;
5999
6000 if (found_key.objectid != key.objectid)
6001 break;
6002 if (found_key.type != BTRFS_DIR_INDEX_KEY)
6003 break;
6004 if (found_key.offset < ctx->pos)
6005 continue;
6006 if (found_key.offset > private->last_index)
6007 break;
6008 if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
6009 continue;
6010 di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
6011 name_len = btrfs_dir_name_len(leaf, di);
6012 if ((total_len + sizeof(struct dir_entry) + name_len) >=
6013 PAGE_SIZE) {
6014 btrfs_release_path(path);
6015 ret = btrfs_filldir(private->filldir_buf, entries, ctx);
6016 if (ret)
6017 goto nopos;
6018 addr = private->filldir_buf;
6019 entries = 0;
6020 total_len = 0;
6021 goto again;
6022 }
6023
6024 ftype = btrfs_dir_flags_to_ftype(btrfs_dir_flags(leaf, di));
6025 entry = addr;
6026 name_ptr = (char *)(entry + 1);
6027 read_extent_buffer(leaf, name_ptr,
6028 (unsigned long)(di + 1), name_len);
6029 put_unaligned(name_len, &entry->name_len);
6030 put_unaligned(fs_ftype_to_dtype(ftype), &entry->type);
6031 btrfs_dir_item_key_to_cpu(leaf, di, &location);
6032 put_unaligned(location.objectid, &entry->ino);
6033 put_unaligned(found_key.offset, &entry->offset);
6034 entries++;
6035 addr += sizeof(struct dir_entry) + name_len;
6036 total_len += sizeof(struct dir_entry) + name_len;
6037 }
6038 /* Catch error encountered during iteration */
6039 if (ret < 0)
6040 goto err;
6041
6042 btrfs_release_path(path);
6043
6044 ret = btrfs_filldir(private->filldir_buf, entries, ctx);
6045 if (ret)
6046 goto nopos;
6047
6048 ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
6049 if (ret)
6050 goto nopos;
6051
6052 /*
6053 * Stop new entries from being returned after we return the last
6054 * entry.
6055 *
6056 * New directory entries are assigned a strictly increasing
6057 * offset. This means that new entries created during readdir
6058 * are *guaranteed* to be seen in the future by that readdir.
6059 * This has broken buggy programs which operate on names as
6060 * they're returned by readdir. Until we reuse freed offsets
6061 * we have this hack to stop new entries from being returned
6062 * under the assumption that they'll never reach this huge
6063 * offset.
6064 *
6065 * This is being careful not to overflow 32bit loff_t unless the
6066 * last entry requires it because doing so has broken 32bit apps
6067 * in the past.
6068 */
6069 if (ctx->pos >= INT_MAX)
6070 ctx->pos = LLONG_MAX;
6071 else
6072 ctx->pos = INT_MAX;
6073 nopos:
6074 ret = 0;
6075 err:
6076 if (put)
6077 btrfs_readdir_put_delayed_items(BTRFS_I(inode), &ins_list, &del_list);
6078 btrfs_free_path(path);
6079 return ret;
6080 }
6081
6082 /*
6083 * This is somewhat expensive, updating the tree every time the
6084 * inode changes. But, it is most likely to find the inode in cache.
6085 * FIXME, needs more benchmarking...there are no reasons other than performance
6086 * to keep or drop this code.
6087 */
btrfs_dirty_inode(struct btrfs_inode * inode)6088 static int btrfs_dirty_inode(struct btrfs_inode *inode)
6089 {
6090 struct btrfs_root *root = inode->root;
6091 struct btrfs_fs_info *fs_info = root->fs_info;
6092 struct btrfs_trans_handle *trans;
6093 int ret;
6094
6095 if (test_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags))
6096 return 0;
6097
6098 trans = btrfs_join_transaction(root);
6099 if (IS_ERR(trans))
6100 return PTR_ERR(trans);
6101
6102 ret = btrfs_update_inode(trans, inode);
6103 if (ret == -ENOSPC || ret == -EDQUOT) {
6104 /* whoops, lets try again with the full transaction */
6105 btrfs_end_transaction(trans);
6106 trans = btrfs_start_transaction(root, 1);
6107 if (IS_ERR(trans))
6108 return PTR_ERR(trans);
6109
6110 ret = btrfs_update_inode(trans, inode);
6111 }
6112 btrfs_end_transaction(trans);
6113 if (inode->delayed_node)
6114 btrfs_balance_delayed_items(fs_info);
6115
6116 return ret;
6117 }
6118
6119 /*
6120 * This is a copy of file_update_time. We need this so we can return error on
6121 * ENOSPC for updating the inode in the case of file write and mmap writes.
6122 */
btrfs_update_time(struct inode * inode,int flags)6123 static int btrfs_update_time(struct inode *inode, int flags)
6124 {
6125 struct btrfs_root *root = BTRFS_I(inode)->root;
6126 bool dirty;
6127
6128 if (btrfs_root_readonly(root))
6129 return -EROFS;
6130
6131 dirty = inode_update_timestamps(inode, flags);
6132 return dirty ? btrfs_dirty_inode(BTRFS_I(inode)) : 0;
6133 }
6134
6135 /*
6136 * helper to find a free sequence number in a given directory. This current
6137 * code is very simple, later versions will do smarter things in the btree
6138 */
btrfs_set_inode_index(struct btrfs_inode * dir,u64 * index)6139 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index)
6140 {
6141 int ret = 0;
6142
6143 if (dir->index_cnt == (u64)-1) {
6144 ret = btrfs_inode_delayed_dir_index_count(dir);
6145 if (ret) {
6146 ret = btrfs_set_inode_index_count(dir);
6147 if (ret)
6148 return ret;
6149 }
6150 }
6151
6152 *index = dir->index_cnt;
6153 dir->index_cnt++;
6154
6155 return ret;
6156 }
6157
btrfs_insert_inode_locked(struct inode * inode)6158 static int btrfs_insert_inode_locked(struct inode *inode)
6159 {
6160 struct btrfs_iget_args args;
6161
6162 args.ino = btrfs_ino(BTRFS_I(inode));
6163 args.root = BTRFS_I(inode)->root;
6164
6165 return insert_inode_locked4(inode,
6166 btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
6167 btrfs_find_actor, &args);
6168 }
6169
btrfs_new_inode_prepare(struct btrfs_new_inode_args * args,unsigned int * trans_num_items)6170 int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args,
6171 unsigned int *trans_num_items)
6172 {
6173 struct inode *dir = args->dir;
6174 struct inode *inode = args->inode;
6175 int ret;
6176
6177 if (!args->orphan) {
6178 ret = fscrypt_setup_filename(dir, &args->dentry->d_name, 0,
6179 &args->fname);
6180 if (ret)
6181 return ret;
6182 }
6183
6184 ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl);
6185 if (ret) {
6186 fscrypt_free_filename(&args->fname);
6187 return ret;
6188 }
6189
6190 /* 1 to add inode item */
6191 *trans_num_items = 1;
6192 /* 1 to add compression property */
6193 if (BTRFS_I(dir)->prop_compress)
6194 (*trans_num_items)++;
6195 /* 1 to add default ACL xattr */
6196 if (args->default_acl)
6197 (*trans_num_items)++;
6198 /* 1 to add access ACL xattr */
6199 if (args->acl)
6200 (*trans_num_items)++;
6201 #ifdef CONFIG_SECURITY
6202 /* 1 to add LSM xattr */
6203 if (dir->i_security)
6204 (*trans_num_items)++;
6205 #endif
6206 if (args->orphan) {
6207 /* 1 to add orphan item */
6208 (*trans_num_items)++;
6209 } else {
6210 /*
6211 * 1 to add dir item
6212 * 1 to add dir index
6213 * 1 to update parent inode item
6214 *
6215 * No need for 1 unit for the inode ref item because it is
6216 * inserted in a batch together with the inode item at
6217 * btrfs_create_new_inode().
6218 */
6219 *trans_num_items += 3;
6220 }
6221 return 0;
6222 }
6223
btrfs_new_inode_args_destroy(struct btrfs_new_inode_args * args)6224 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args)
6225 {
6226 posix_acl_release(args->acl);
6227 posix_acl_release(args->default_acl);
6228 fscrypt_free_filename(&args->fname);
6229 }
6230
6231 /*
6232 * Inherit flags from the parent inode.
6233 *
6234 * Currently only the compression flags and the cow flags are inherited.
6235 */
btrfs_inherit_iflags(struct btrfs_inode * inode,struct btrfs_inode * dir)6236 static void btrfs_inherit_iflags(struct btrfs_inode *inode, struct btrfs_inode *dir)
6237 {
6238 unsigned int flags;
6239
6240 flags = dir->flags;
6241
6242 if (flags & BTRFS_INODE_NOCOMPRESS) {
6243 inode->flags &= ~BTRFS_INODE_COMPRESS;
6244 inode->flags |= BTRFS_INODE_NOCOMPRESS;
6245 } else if (flags & BTRFS_INODE_COMPRESS) {
6246 inode->flags &= ~BTRFS_INODE_NOCOMPRESS;
6247 inode->flags |= BTRFS_INODE_COMPRESS;
6248 }
6249
6250 if (flags & BTRFS_INODE_NODATACOW) {
6251 inode->flags |= BTRFS_INODE_NODATACOW;
6252 if (S_ISREG(inode->vfs_inode.i_mode))
6253 inode->flags |= BTRFS_INODE_NODATASUM;
6254 }
6255
6256 btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode);
6257 }
6258
btrfs_create_new_inode(struct btrfs_trans_handle * trans,struct btrfs_new_inode_args * args)6259 int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
6260 struct btrfs_new_inode_args *args)
6261 {
6262 struct timespec64 ts;
6263 struct inode *dir = args->dir;
6264 struct inode *inode = args->inode;
6265 const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name;
6266 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
6267 struct btrfs_root *root;
6268 struct btrfs_inode_item *inode_item;
6269 struct btrfs_path *path;
6270 u64 objectid;
6271 struct btrfs_inode_ref *ref;
6272 struct btrfs_key key[2];
6273 u32 sizes[2];
6274 struct btrfs_item_batch batch;
6275 unsigned long ptr;
6276 int ret;
6277 bool xa_reserved = false;
6278
6279 path = btrfs_alloc_path();
6280 if (!path)
6281 return -ENOMEM;
6282
6283 if (!args->subvol)
6284 BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root);
6285 root = BTRFS_I(inode)->root;
6286
6287 ret = btrfs_init_file_extent_tree(BTRFS_I(inode));
6288 if (ret)
6289 goto out;
6290
6291 ret = btrfs_get_free_objectid(root, &objectid);
6292 if (ret)
6293 goto out;
6294 btrfs_set_inode_number(BTRFS_I(inode), objectid);
6295
6296 ret = xa_reserve(&root->inodes, objectid, GFP_NOFS);
6297 if (ret)
6298 goto out;
6299 xa_reserved = true;
6300
6301 if (args->orphan) {
6302 /*
6303 * O_TMPFILE, set link count to 0, so that after this point, we
6304 * fill in an inode item with the correct link count.
6305 */
6306 set_nlink(inode, 0);
6307 } else {
6308 trace_btrfs_inode_request(dir);
6309
6310 ret = btrfs_set_inode_index(BTRFS_I(dir), &BTRFS_I(inode)->dir_index);
6311 if (ret)
6312 goto out;
6313 }
6314
6315 if (S_ISDIR(inode->i_mode))
6316 BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX;
6317
6318 BTRFS_I(inode)->generation = trans->transid;
6319 inode->i_generation = BTRFS_I(inode)->generation;
6320
6321 /*
6322 * We don't have any capability xattrs set here yet, shortcut any
6323 * queries for the xattrs here. If we add them later via the inode
6324 * security init path or any other path this flag will be cleared.
6325 */
6326 set_bit(BTRFS_INODE_NO_CAP_XATTR, &BTRFS_I(inode)->runtime_flags);
6327
6328 /*
6329 * Subvolumes don't inherit flags from their parent directory.
6330 * Originally this was probably by accident, but we probably can't
6331 * change it now without compatibility issues.
6332 */
6333 if (!args->subvol)
6334 btrfs_inherit_iflags(BTRFS_I(inode), BTRFS_I(dir));
6335
6336 if (S_ISREG(inode->i_mode)) {
6337 if (btrfs_test_opt(fs_info, NODATASUM))
6338 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6339 if (btrfs_test_opt(fs_info, NODATACOW))
6340 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
6341 BTRFS_INODE_NODATASUM;
6342 }
6343
6344 ret = btrfs_insert_inode_locked(inode);
6345 if (ret < 0) {
6346 if (!args->orphan)
6347 BTRFS_I(dir)->index_cnt--;
6348 goto out;
6349 }
6350
6351 /*
6352 * We could have gotten an inode number from somebody who was fsynced
6353 * and then removed in this same transaction, so let's just set full
6354 * sync since it will be a full sync anyway and this will blow away the
6355 * old info in the log.
6356 */
6357 btrfs_set_inode_full_sync(BTRFS_I(inode));
6358
6359 key[0].objectid = objectid;
6360 key[0].type = BTRFS_INODE_ITEM_KEY;
6361 key[0].offset = 0;
6362
6363 sizes[0] = sizeof(struct btrfs_inode_item);
6364
6365 if (!args->orphan) {
6366 /*
6367 * Start new inodes with an inode_ref. This is slightly more
6368 * efficient for small numbers of hard links since they will
6369 * be packed into one item. Extended refs will kick in if we
6370 * add more hard links than can fit in the ref item.
6371 */
6372 key[1].objectid = objectid;
6373 key[1].type = BTRFS_INODE_REF_KEY;
6374 if (args->subvol) {
6375 key[1].offset = objectid;
6376 sizes[1] = 2 + sizeof(*ref);
6377 } else {
6378 key[1].offset = btrfs_ino(BTRFS_I(dir));
6379 sizes[1] = name->len + sizeof(*ref);
6380 }
6381 }
6382
6383 batch.keys = &key[0];
6384 batch.data_sizes = &sizes[0];
6385 batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]);
6386 batch.nr = args->orphan ? 1 : 2;
6387 ret = btrfs_insert_empty_items(trans, root, path, &batch);
6388 if (ret != 0) {
6389 btrfs_abort_transaction(trans, ret);
6390 goto discard;
6391 }
6392
6393 ts = simple_inode_init_ts(inode);
6394 BTRFS_I(inode)->i_otime_sec = ts.tv_sec;
6395 BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec;
6396
6397 /*
6398 * We're going to fill the inode item now, so at this point the inode
6399 * must be fully initialized.
6400 */
6401
6402 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
6403 struct btrfs_inode_item);
6404 memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item,
6405 sizeof(*inode_item));
6406 fill_inode_item(trans, path->nodes[0], inode_item, inode);
6407
6408 if (!args->orphan) {
6409 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
6410 struct btrfs_inode_ref);
6411 ptr = (unsigned long)(ref + 1);
6412 if (args->subvol) {
6413 btrfs_set_inode_ref_name_len(path->nodes[0], ref, 2);
6414 btrfs_set_inode_ref_index(path->nodes[0], ref, 0);
6415 write_extent_buffer(path->nodes[0], "..", ptr, 2);
6416 } else {
6417 btrfs_set_inode_ref_name_len(path->nodes[0], ref,
6418 name->len);
6419 btrfs_set_inode_ref_index(path->nodes[0], ref,
6420 BTRFS_I(inode)->dir_index);
6421 write_extent_buffer(path->nodes[0], name->name, ptr,
6422 name->len);
6423 }
6424 }
6425
6426 /*
6427 * We don't need the path anymore, plus inheriting properties, adding
6428 * ACLs, security xattrs, orphan item or adding the link, will result in
6429 * allocating yet another path. So just free our path.
6430 */
6431 btrfs_free_path(path);
6432 path = NULL;
6433
6434 if (args->subvol) {
6435 struct inode *parent;
6436
6437 /*
6438 * Subvolumes inherit properties from their parent subvolume,
6439 * not the directory they were created in.
6440 */
6441 parent = btrfs_iget(BTRFS_FIRST_FREE_OBJECTID, BTRFS_I(dir)->root);
6442 if (IS_ERR(parent)) {
6443 ret = PTR_ERR(parent);
6444 } else {
6445 ret = btrfs_inode_inherit_props(trans, inode, parent);
6446 iput(parent);
6447 }
6448 } else {
6449 ret = btrfs_inode_inherit_props(trans, inode, dir);
6450 }
6451 if (ret) {
6452 btrfs_err(fs_info,
6453 "error inheriting props for ino %llu (root %llu): %d",
6454 btrfs_ino(BTRFS_I(inode)), btrfs_root_id(root), ret);
6455 }
6456
6457 /*
6458 * Subvolumes don't inherit ACLs or get passed to the LSM. This is
6459 * probably a bug.
6460 */
6461 if (!args->subvol) {
6462 ret = btrfs_init_inode_security(trans, args);
6463 if (ret) {
6464 btrfs_abort_transaction(trans, ret);
6465 goto discard;
6466 }
6467 }
6468
6469 ret = btrfs_add_inode_to_root(BTRFS_I(inode), false);
6470 if (WARN_ON(ret)) {
6471 /* Shouldn't happen, we used xa_reserve() before. */
6472 btrfs_abort_transaction(trans, ret);
6473 goto discard;
6474 }
6475
6476 trace_btrfs_inode_new(inode);
6477 btrfs_set_inode_last_trans(trans, BTRFS_I(inode));
6478
6479 btrfs_update_root_times(trans, root);
6480
6481 if (args->orphan) {
6482 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
6483 } else {
6484 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
6485 0, BTRFS_I(inode)->dir_index);
6486 }
6487 if (ret) {
6488 btrfs_abort_transaction(trans, ret);
6489 goto discard;
6490 }
6491
6492 return 0;
6493
6494 discard:
6495 /*
6496 * discard_new_inode() calls iput(), but the caller owns the reference
6497 * to the inode.
6498 */
6499 ihold(inode);
6500 discard_new_inode(inode);
6501 out:
6502 if (xa_reserved)
6503 xa_release(&root->inodes, objectid);
6504
6505 btrfs_free_path(path);
6506 return ret;
6507 }
6508
6509 /*
6510 * utility function to add 'inode' into 'parent_inode' with
6511 * a give name and a given sequence number.
6512 * if 'add_backref' is true, also insert a backref from the
6513 * inode to the parent directory.
6514 */
btrfs_add_link(struct btrfs_trans_handle * trans,struct btrfs_inode * parent_inode,struct btrfs_inode * inode,const struct fscrypt_str * name,int add_backref,u64 index)6515 int btrfs_add_link(struct btrfs_trans_handle *trans,
6516 struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
6517 const struct fscrypt_str *name, int add_backref, u64 index)
6518 {
6519 int ret = 0;
6520 struct btrfs_key key;
6521 struct btrfs_root *root = parent_inode->root;
6522 u64 ino = btrfs_ino(inode);
6523 u64 parent_ino = btrfs_ino(parent_inode);
6524
6525 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6526 memcpy(&key, &inode->root->root_key, sizeof(key));
6527 } else {
6528 key.objectid = ino;
6529 key.type = BTRFS_INODE_ITEM_KEY;
6530 key.offset = 0;
6531 }
6532
6533 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6534 ret = btrfs_add_root_ref(trans, key.objectid,
6535 btrfs_root_id(root), parent_ino,
6536 index, name);
6537 } else if (add_backref) {
6538 ret = btrfs_insert_inode_ref(trans, root, name,
6539 ino, parent_ino, index);
6540 }
6541
6542 /* Nothing to clean up yet */
6543 if (ret)
6544 return ret;
6545
6546 ret = btrfs_insert_dir_item(trans, name, parent_inode, &key,
6547 btrfs_inode_type(&inode->vfs_inode), index);
6548 if (ret == -EEXIST || ret == -EOVERFLOW)
6549 goto fail_dir_item;
6550 else if (ret) {
6551 btrfs_abort_transaction(trans, ret);
6552 return ret;
6553 }
6554
6555 btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
6556 name->len * 2);
6557 inode_inc_iversion(&parent_inode->vfs_inode);
6558 /*
6559 * If we are replaying a log tree, we do not want to update the mtime
6560 * and ctime of the parent directory with the current time, since the
6561 * log replay procedure is responsible for setting them to their correct
6562 * values (the ones it had when the fsync was done).
6563 */
6564 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags))
6565 inode_set_mtime_to_ts(&parent_inode->vfs_inode,
6566 inode_set_ctime_current(&parent_inode->vfs_inode));
6567
6568 ret = btrfs_update_inode(trans, parent_inode);
6569 if (ret)
6570 btrfs_abort_transaction(trans, ret);
6571 return ret;
6572
6573 fail_dir_item:
6574 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6575 u64 local_index;
6576 int err;
6577 err = btrfs_del_root_ref(trans, key.objectid,
6578 btrfs_root_id(root), parent_ino,
6579 &local_index, name);
6580 if (err)
6581 btrfs_abort_transaction(trans, err);
6582 } else if (add_backref) {
6583 u64 local_index;
6584 int err;
6585
6586 err = btrfs_del_inode_ref(trans, root, name, ino, parent_ino,
6587 &local_index);
6588 if (err)
6589 btrfs_abort_transaction(trans, err);
6590 }
6591
6592 /* Return the original error code */
6593 return ret;
6594 }
6595
btrfs_create_common(struct inode * dir,struct dentry * dentry,struct inode * inode)6596 static int btrfs_create_common(struct inode *dir, struct dentry *dentry,
6597 struct inode *inode)
6598 {
6599 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
6600 struct btrfs_root *root = BTRFS_I(dir)->root;
6601 struct btrfs_new_inode_args new_inode_args = {
6602 .dir = dir,
6603 .dentry = dentry,
6604 .inode = inode,
6605 };
6606 unsigned int trans_num_items;
6607 struct btrfs_trans_handle *trans;
6608 int err;
6609
6610 err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
6611 if (err)
6612 goto out_inode;
6613
6614 trans = btrfs_start_transaction(root, trans_num_items);
6615 if (IS_ERR(trans)) {
6616 err = PTR_ERR(trans);
6617 goto out_new_inode_args;
6618 }
6619
6620 err = btrfs_create_new_inode(trans, &new_inode_args);
6621 if (!err)
6622 d_instantiate_new(dentry, inode);
6623
6624 btrfs_end_transaction(trans);
6625 btrfs_btree_balance_dirty(fs_info);
6626 out_new_inode_args:
6627 btrfs_new_inode_args_destroy(&new_inode_args);
6628 out_inode:
6629 if (err)
6630 iput(inode);
6631 return err;
6632 }
6633
btrfs_mknod(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode,dev_t rdev)6634 static int btrfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
6635 struct dentry *dentry, umode_t mode, dev_t rdev)
6636 {
6637 struct inode *inode;
6638
6639 inode = new_inode(dir->i_sb);
6640 if (!inode)
6641 return -ENOMEM;
6642 inode_init_owner(idmap, inode, dir, mode);
6643 inode->i_op = &btrfs_special_inode_operations;
6644 init_special_inode(inode, inode->i_mode, rdev);
6645 return btrfs_create_common(dir, dentry, inode);
6646 }
6647
btrfs_create(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode,bool excl)6648 static int btrfs_create(struct mnt_idmap *idmap, struct inode *dir,
6649 struct dentry *dentry, umode_t mode, bool excl)
6650 {
6651 struct inode *inode;
6652
6653 inode = new_inode(dir->i_sb);
6654 if (!inode)
6655 return -ENOMEM;
6656 inode_init_owner(idmap, inode, dir, mode);
6657 inode->i_fop = &btrfs_file_operations;
6658 inode->i_op = &btrfs_file_inode_operations;
6659 inode->i_mapping->a_ops = &btrfs_aops;
6660 return btrfs_create_common(dir, dentry, inode);
6661 }
6662
btrfs_link(struct dentry * old_dentry,struct inode * dir,struct dentry * dentry)6663 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
6664 struct dentry *dentry)
6665 {
6666 struct btrfs_trans_handle *trans = NULL;
6667 struct btrfs_root *root = BTRFS_I(dir)->root;
6668 struct inode *inode = d_inode(old_dentry);
6669 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
6670 struct fscrypt_name fname;
6671 u64 index;
6672 int err;
6673 int drop_inode = 0;
6674
6675 /* do not allow sys_link's with other subvols of the same device */
6676 if (btrfs_root_id(root) != btrfs_root_id(BTRFS_I(inode)->root))
6677 return -EXDEV;
6678
6679 if (inode->i_nlink >= BTRFS_LINK_MAX)
6680 return -EMLINK;
6681
6682 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname);
6683 if (err)
6684 goto fail;
6685
6686 err = btrfs_set_inode_index(BTRFS_I(dir), &index);
6687 if (err)
6688 goto fail;
6689
6690 /*
6691 * 2 items for inode and inode ref
6692 * 2 items for dir items
6693 * 1 item for parent inode
6694 * 1 item for orphan item deletion if O_TMPFILE
6695 */
6696 trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6);
6697 if (IS_ERR(trans)) {
6698 err = PTR_ERR(trans);
6699 trans = NULL;
6700 goto fail;
6701 }
6702
6703 /* There are several dir indexes for this inode, clear the cache. */
6704 BTRFS_I(inode)->dir_index = 0ULL;
6705 inc_nlink(inode);
6706 inode_inc_iversion(inode);
6707 inode_set_ctime_current(inode);
6708 ihold(inode);
6709 set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
6710
6711 err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
6712 &fname.disk_name, 1, index);
6713
6714 if (err) {
6715 drop_inode = 1;
6716 } else {
6717 struct dentry *parent = dentry->d_parent;
6718
6719 err = btrfs_update_inode(trans, BTRFS_I(inode));
6720 if (err)
6721 goto fail;
6722 if (inode->i_nlink == 1) {
6723 /*
6724 * If new hard link count is 1, it's a file created
6725 * with open(2) O_TMPFILE flag.
6726 */
6727 err = btrfs_orphan_del(trans, BTRFS_I(inode));
6728 if (err)
6729 goto fail;
6730 }
6731 d_instantiate(dentry, inode);
6732 btrfs_log_new_name(trans, old_dentry, NULL, 0, parent);
6733 }
6734
6735 fail:
6736 fscrypt_free_filename(&fname);
6737 if (trans)
6738 btrfs_end_transaction(trans);
6739 if (drop_inode) {
6740 inode_dec_link_count(inode);
6741 iput(inode);
6742 }
6743 btrfs_btree_balance_dirty(fs_info);
6744 return err;
6745 }
6746
btrfs_mkdir(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode)6747 static int btrfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
6748 struct dentry *dentry, umode_t mode)
6749 {
6750 struct inode *inode;
6751
6752 inode = new_inode(dir->i_sb);
6753 if (!inode)
6754 return -ENOMEM;
6755 inode_init_owner(idmap, inode, dir, S_IFDIR | mode);
6756 inode->i_op = &btrfs_dir_inode_operations;
6757 inode->i_fop = &btrfs_dir_file_operations;
6758 return btrfs_create_common(dir, dentry, inode);
6759 }
6760
uncompress_inline(struct btrfs_path * path,struct folio * folio,struct btrfs_file_extent_item * item)6761 static noinline int uncompress_inline(struct btrfs_path *path,
6762 struct folio *folio,
6763 struct btrfs_file_extent_item *item)
6764 {
6765 int ret;
6766 struct extent_buffer *leaf = path->nodes[0];
6767 char *tmp;
6768 size_t max_size;
6769 unsigned long inline_size;
6770 unsigned long ptr;
6771 int compress_type;
6772
6773 compress_type = btrfs_file_extent_compression(leaf, item);
6774 max_size = btrfs_file_extent_ram_bytes(leaf, item);
6775 inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]);
6776 tmp = kmalloc(inline_size, GFP_NOFS);
6777 if (!tmp)
6778 return -ENOMEM;
6779 ptr = btrfs_file_extent_inline_start(item);
6780
6781 read_extent_buffer(leaf, tmp, ptr, inline_size);
6782
6783 max_size = min_t(unsigned long, PAGE_SIZE, max_size);
6784 ret = btrfs_decompress(compress_type, tmp, folio, 0, inline_size,
6785 max_size);
6786
6787 /*
6788 * decompression code contains a memset to fill in any space between the end
6789 * of the uncompressed data and the end of max_size in case the decompressed
6790 * data ends up shorter than ram_bytes. That doesn't cover the hole between
6791 * the end of an inline extent and the beginning of the next block, so we
6792 * cover that region here.
6793 */
6794
6795 if (max_size < PAGE_SIZE)
6796 folio_zero_range(folio, max_size, PAGE_SIZE - max_size);
6797 kfree(tmp);
6798 return ret;
6799 }
6800
read_inline_extent(struct btrfs_path * path,struct folio * folio)6801 static int read_inline_extent(struct btrfs_path *path, struct folio *folio)
6802 {
6803 struct btrfs_file_extent_item *fi;
6804 void *kaddr;
6805 size_t copy_size;
6806
6807 if (!folio || folio_test_uptodate(folio))
6808 return 0;
6809
6810 ASSERT(folio_pos(folio) == 0);
6811
6812 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
6813 struct btrfs_file_extent_item);
6814 if (btrfs_file_extent_compression(path->nodes[0], fi) != BTRFS_COMPRESS_NONE)
6815 return uncompress_inline(path, folio, fi);
6816
6817 copy_size = min_t(u64, PAGE_SIZE,
6818 btrfs_file_extent_ram_bytes(path->nodes[0], fi));
6819 kaddr = kmap_local_folio(folio, 0);
6820 read_extent_buffer(path->nodes[0], kaddr,
6821 btrfs_file_extent_inline_start(fi), copy_size);
6822 kunmap_local(kaddr);
6823 if (copy_size < PAGE_SIZE)
6824 folio_zero_range(folio, copy_size, PAGE_SIZE - copy_size);
6825 return 0;
6826 }
6827
6828 /*
6829 * Lookup the first extent overlapping a range in a file.
6830 *
6831 * @inode: file to search in
6832 * @page: page to read extent data into if the extent is inline
6833 * @start: file offset
6834 * @len: length of range starting at @start
6835 *
6836 * Return the first &struct extent_map which overlaps the given range, reading
6837 * it from the B-tree and caching it if necessary. Note that there may be more
6838 * extents which overlap the given range after the returned extent_map.
6839 *
6840 * If @page is not NULL and the extent is inline, this also reads the extent
6841 * data directly into the page and marks the extent up to date in the io_tree.
6842 *
6843 * Return: ERR_PTR on error, non-NULL extent_map on success.
6844 */
btrfs_get_extent(struct btrfs_inode * inode,struct folio * folio,u64 start,u64 len)6845 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
6846 struct folio *folio, u64 start, u64 len)
6847 {
6848 struct btrfs_fs_info *fs_info = inode->root->fs_info;
6849 int ret = 0;
6850 u64 extent_start = 0;
6851 u64 extent_end = 0;
6852 u64 objectid = btrfs_ino(inode);
6853 int extent_type = -1;
6854 struct btrfs_path *path = NULL;
6855 struct btrfs_root *root = inode->root;
6856 struct btrfs_file_extent_item *item;
6857 struct extent_buffer *leaf;
6858 struct btrfs_key found_key;
6859 struct extent_map *em = NULL;
6860 struct extent_map_tree *em_tree = &inode->extent_tree;
6861
6862 read_lock(&em_tree->lock);
6863 em = lookup_extent_mapping(em_tree, start, len);
6864 read_unlock(&em_tree->lock);
6865
6866 if (em) {
6867 if (em->start > start || em->start + em->len <= start)
6868 free_extent_map(em);
6869 else if (em->disk_bytenr == EXTENT_MAP_INLINE && folio)
6870 free_extent_map(em);
6871 else
6872 goto out;
6873 }
6874 em = alloc_extent_map();
6875 if (!em) {
6876 ret = -ENOMEM;
6877 goto out;
6878 }
6879 em->start = EXTENT_MAP_HOLE;
6880 em->disk_bytenr = EXTENT_MAP_HOLE;
6881 em->len = (u64)-1;
6882
6883 path = btrfs_alloc_path();
6884 if (!path) {
6885 ret = -ENOMEM;
6886 goto out;
6887 }
6888
6889 /* Chances are we'll be called again, so go ahead and do readahead */
6890 path->reada = READA_FORWARD;
6891
6892 /*
6893 * The same explanation in load_free_space_cache applies here as well,
6894 * we only read when we're loading the free space cache, and at that
6895 * point the commit_root has everything we need.
6896 */
6897 if (btrfs_is_free_space_inode(inode)) {
6898 path->search_commit_root = 1;
6899 path->skip_locking = 1;
6900 }
6901
6902 ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0);
6903 if (ret < 0) {
6904 goto out;
6905 } else if (ret > 0) {
6906 if (path->slots[0] == 0)
6907 goto not_found;
6908 path->slots[0]--;
6909 ret = 0;
6910 }
6911
6912 leaf = path->nodes[0];
6913 item = btrfs_item_ptr(leaf, path->slots[0],
6914 struct btrfs_file_extent_item);
6915 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6916 if (found_key.objectid != objectid ||
6917 found_key.type != BTRFS_EXTENT_DATA_KEY) {
6918 /*
6919 * If we backup past the first extent we want to move forward
6920 * and see if there is an extent in front of us, otherwise we'll
6921 * say there is a hole for our whole search range which can
6922 * cause problems.
6923 */
6924 extent_end = start;
6925 goto next;
6926 }
6927
6928 extent_type = btrfs_file_extent_type(leaf, item);
6929 extent_start = found_key.offset;
6930 extent_end = btrfs_file_extent_end(path);
6931 if (extent_type == BTRFS_FILE_EXTENT_REG ||
6932 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
6933 /* Only regular file could have regular/prealloc extent */
6934 if (!S_ISREG(inode->vfs_inode.i_mode)) {
6935 ret = -EUCLEAN;
6936 btrfs_crit(fs_info,
6937 "regular/prealloc extent found for non-regular inode %llu",
6938 btrfs_ino(inode));
6939 goto out;
6940 }
6941 trace_btrfs_get_extent_show_fi_regular(inode, leaf, item,
6942 extent_start);
6943 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
6944 trace_btrfs_get_extent_show_fi_inline(inode, leaf, item,
6945 path->slots[0],
6946 extent_start);
6947 }
6948 next:
6949 if (start >= extent_end) {
6950 path->slots[0]++;
6951 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
6952 ret = btrfs_next_leaf(root, path);
6953 if (ret < 0)
6954 goto out;
6955 else if (ret > 0)
6956 goto not_found;
6957
6958 leaf = path->nodes[0];
6959 }
6960 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6961 if (found_key.objectid != objectid ||
6962 found_key.type != BTRFS_EXTENT_DATA_KEY)
6963 goto not_found;
6964 if (start + len <= found_key.offset)
6965 goto not_found;
6966 if (start > found_key.offset)
6967 goto next;
6968
6969 /* New extent overlaps with existing one */
6970 em->start = start;
6971 em->len = found_key.offset - start;
6972 em->disk_bytenr = EXTENT_MAP_HOLE;
6973 goto insert;
6974 }
6975
6976 btrfs_extent_item_to_extent_map(inode, path, item, em);
6977
6978 if (extent_type == BTRFS_FILE_EXTENT_REG ||
6979 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
6980 goto insert;
6981 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
6982 /*
6983 * Inline extent can only exist at file offset 0. This is
6984 * ensured by tree-checker and inline extent creation path.
6985 * Thus all members representing file offsets should be zero.
6986 */
6987 ASSERT(extent_start == 0);
6988 ASSERT(em->start == 0);
6989
6990 /*
6991 * btrfs_extent_item_to_extent_map() should have properly
6992 * initialized em members already.
6993 *
6994 * Other members are not utilized for inline extents.
6995 */
6996 ASSERT(em->disk_bytenr == EXTENT_MAP_INLINE);
6997 ASSERT(em->len == fs_info->sectorsize);
6998
6999 ret = read_inline_extent(path, folio);
7000 if (ret < 0)
7001 goto out;
7002 goto insert;
7003 }
7004 not_found:
7005 em->start = start;
7006 em->len = len;
7007 em->disk_bytenr = EXTENT_MAP_HOLE;
7008 insert:
7009 ret = 0;
7010 btrfs_release_path(path);
7011 if (em->start > start || extent_map_end(em) <= start) {
7012 btrfs_err(fs_info,
7013 "bad extent! em: [%llu %llu] passed [%llu %llu]",
7014 em->start, em->len, start, len);
7015 ret = -EIO;
7016 goto out;
7017 }
7018
7019 write_lock(&em_tree->lock);
7020 ret = btrfs_add_extent_mapping(inode, &em, start, len);
7021 write_unlock(&em_tree->lock);
7022 out:
7023 btrfs_free_path(path);
7024
7025 trace_btrfs_get_extent(root, inode, em);
7026
7027 if (ret) {
7028 free_extent_map(em);
7029 return ERR_PTR(ret);
7030 }
7031 return em;
7032 }
7033
btrfs_extent_readonly(struct btrfs_fs_info * fs_info,u64 bytenr)7034 static bool btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
7035 {
7036 struct btrfs_block_group *block_group;
7037 bool readonly = false;
7038
7039 block_group = btrfs_lookup_block_group(fs_info, bytenr);
7040 if (!block_group || block_group->ro)
7041 readonly = true;
7042 if (block_group)
7043 btrfs_put_block_group(block_group);
7044 return readonly;
7045 }
7046
7047 /*
7048 * Check if we can do nocow write into the range [@offset, @offset + @len)
7049 *
7050 * @offset: File offset
7051 * @len: The length to write, will be updated to the nocow writeable
7052 * range
7053 * @orig_start: (optional) Return the original file offset of the file extent
7054 * @orig_len: (optional) Return the original on-disk length of the file extent
7055 * @ram_bytes: (optional) Return the ram_bytes of the file extent
7056 *
7057 * Return:
7058 * >0 and update @len if we can do nocow write
7059 * 0 if we can't do nocow write
7060 * <0 if error happened
7061 *
7062 * NOTE: This only checks the file extents, caller is responsible to wait for
7063 * any ordered extents.
7064 */
can_nocow_extent(struct inode * inode,u64 offset,u64 * len,struct btrfs_file_extent * file_extent,bool nowait)7065 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
7066 struct btrfs_file_extent *file_extent,
7067 bool nowait)
7068 {
7069 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
7070 struct can_nocow_file_extent_args nocow_args = { 0 };
7071 struct btrfs_path *path;
7072 int ret;
7073 struct extent_buffer *leaf;
7074 struct btrfs_root *root = BTRFS_I(inode)->root;
7075 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7076 struct btrfs_file_extent_item *fi;
7077 struct btrfs_key key;
7078 int found_type;
7079
7080 path = btrfs_alloc_path();
7081 if (!path)
7082 return -ENOMEM;
7083 path->nowait = nowait;
7084
7085 ret = btrfs_lookup_file_extent(NULL, root, path,
7086 btrfs_ino(BTRFS_I(inode)), offset, 0);
7087 if (ret < 0)
7088 goto out;
7089
7090 if (ret == 1) {
7091 if (path->slots[0] == 0) {
7092 /* can't find the item, must cow */
7093 ret = 0;
7094 goto out;
7095 }
7096 path->slots[0]--;
7097 }
7098 ret = 0;
7099 leaf = path->nodes[0];
7100 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
7101 if (key.objectid != btrfs_ino(BTRFS_I(inode)) ||
7102 key.type != BTRFS_EXTENT_DATA_KEY) {
7103 /* not our file or wrong item type, must cow */
7104 goto out;
7105 }
7106
7107 if (key.offset > offset) {
7108 /* Wrong offset, must cow */
7109 goto out;
7110 }
7111
7112 if (btrfs_file_extent_end(path) <= offset)
7113 goto out;
7114
7115 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
7116 found_type = btrfs_file_extent_type(leaf, fi);
7117
7118 nocow_args.start = offset;
7119 nocow_args.end = offset + *len - 1;
7120 nocow_args.free_path = true;
7121
7122 ret = can_nocow_file_extent(path, &key, BTRFS_I(inode), &nocow_args);
7123 /* can_nocow_file_extent() has freed the path. */
7124 path = NULL;
7125
7126 if (ret != 1) {
7127 /* Treat errors as not being able to NOCOW. */
7128 ret = 0;
7129 goto out;
7130 }
7131
7132 ret = 0;
7133 if (btrfs_extent_readonly(fs_info,
7134 nocow_args.file_extent.disk_bytenr +
7135 nocow_args.file_extent.offset))
7136 goto out;
7137
7138 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7139 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7140 u64 range_end;
7141
7142 range_end = round_up(offset + nocow_args.file_extent.num_bytes,
7143 root->fs_info->sectorsize) - 1;
7144 ret = test_range_bit_exists(io_tree, offset, range_end, EXTENT_DELALLOC);
7145 if (ret) {
7146 ret = -EAGAIN;
7147 goto out;
7148 }
7149 }
7150
7151 if (file_extent)
7152 memcpy(file_extent, &nocow_args.file_extent, sizeof(*file_extent));
7153
7154 *len = nocow_args.file_extent.num_bytes;
7155 ret = 1;
7156 out:
7157 btrfs_free_path(path);
7158 return ret;
7159 }
7160
7161 /* The callers of this must take lock_extent() */
btrfs_create_io_em(struct btrfs_inode * inode,u64 start,const struct btrfs_file_extent * file_extent,int type)7162 struct extent_map *btrfs_create_io_em(struct btrfs_inode *inode, u64 start,
7163 const struct btrfs_file_extent *file_extent,
7164 int type)
7165 {
7166 struct extent_map *em;
7167 int ret;
7168
7169 /*
7170 * Note the missing NOCOW type.
7171 *
7172 * For pure NOCOW writes, we should not create an io extent map, but
7173 * just reusing the existing one.
7174 * Only PREALLOC writes (NOCOW write into preallocated range) can
7175 * create an io extent map.
7176 */
7177 ASSERT(type == BTRFS_ORDERED_PREALLOC ||
7178 type == BTRFS_ORDERED_COMPRESSED ||
7179 type == BTRFS_ORDERED_REGULAR);
7180
7181 switch (type) {
7182 case BTRFS_ORDERED_PREALLOC:
7183 /* We're only referring part of a larger preallocated extent. */
7184 ASSERT(file_extent->num_bytes <= file_extent->ram_bytes);
7185 break;
7186 case BTRFS_ORDERED_REGULAR:
7187 /* COW results a new extent matching our file extent size. */
7188 ASSERT(file_extent->disk_num_bytes == file_extent->num_bytes);
7189 ASSERT(file_extent->ram_bytes == file_extent->num_bytes);
7190
7191 /* Since it's a new extent, we should not have any offset. */
7192 ASSERT(file_extent->offset == 0);
7193 break;
7194 case BTRFS_ORDERED_COMPRESSED:
7195 /* Must be compressed. */
7196 ASSERT(file_extent->compression != BTRFS_COMPRESS_NONE);
7197
7198 /*
7199 * Encoded write can make us to refer to part of the
7200 * uncompressed extent.
7201 */
7202 ASSERT(file_extent->num_bytes <= file_extent->ram_bytes);
7203 break;
7204 }
7205
7206 em = alloc_extent_map();
7207 if (!em)
7208 return ERR_PTR(-ENOMEM);
7209
7210 em->start = start;
7211 em->len = file_extent->num_bytes;
7212 em->disk_bytenr = file_extent->disk_bytenr;
7213 em->disk_num_bytes = file_extent->disk_num_bytes;
7214 em->ram_bytes = file_extent->ram_bytes;
7215 em->generation = -1;
7216 em->offset = file_extent->offset;
7217 em->flags |= EXTENT_FLAG_PINNED;
7218 if (type == BTRFS_ORDERED_COMPRESSED)
7219 extent_map_set_compression(em, file_extent->compression);
7220
7221 ret = btrfs_replace_extent_map_range(inode, em, true);
7222 if (ret) {
7223 free_extent_map(em);
7224 return ERR_PTR(ret);
7225 }
7226
7227 /* em got 2 refs now, callers needs to do free_extent_map once. */
7228 return em;
7229 }
7230
7231 /*
7232 * For release_folio() and invalidate_folio() we have a race window where
7233 * folio_end_writeback() is called but the subpage spinlock is not yet released.
7234 * If we continue to release/invalidate the page, we could cause use-after-free
7235 * for subpage spinlock. So this function is to spin and wait for subpage
7236 * spinlock.
7237 */
wait_subpage_spinlock(struct folio * folio)7238 static void wait_subpage_spinlock(struct folio *folio)
7239 {
7240 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
7241 struct btrfs_subpage *subpage;
7242
7243 if (!btrfs_is_subpage(fs_info, folio->mapping))
7244 return;
7245
7246 ASSERT(folio_test_private(folio) && folio_get_private(folio));
7247 subpage = folio_get_private(folio);
7248
7249 /*
7250 * This may look insane as we just acquire the spinlock and release it,
7251 * without doing anything. But we just want to make sure no one is
7252 * still holding the subpage spinlock.
7253 * And since the page is not dirty nor writeback, and we have page
7254 * locked, the only possible way to hold a spinlock is from the endio
7255 * function to clear page writeback.
7256 *
7257 * Here we just acquire the spinlock so that all existing callers
7258 * should exit and we're safe to release/invalidate the page.
7259 */
7260 spin_lock_irq(&subpage->lock);
7261 spin_unlock_irq(&subpage->lock);
7262 }
7263
btrfs_launder_folio(struct folio * folio)7264 static int btrfs_launder_folio(struct folio *folio)
7265 {
7266 return btrfs_qgroup_free_data(folio_to_inode(folio), NULL, folio_pos(folio),
7267 PAGE_SIZE, NULL);
7268 }
7269
__btrfs_release_folio(struct folio * folio,gfp_t gfp_flags)7270 static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
7271 {
7272 if (try_release_extent_mapping(folio, gfp_flags)) {
7273 wait_subpage_spinlock(folio);
7274 clear_folio_extent_mapped(folio);
7275 return true;
7276 }
7277 return false;
7278 }
7279
btrfs_release_folio(struct folio * folio,gfp_t gfp_flags)7280 static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
7281 {
7282 if (folio_test_writeback(folio) || folio_test_dirty(folio))
7283 return false;
7284 return __btrfs_release_folio(folio, gfp_flags);
7285 }
7286
7287 #ifdef CONFIG_MIGRATION
btrfs_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)7288 static int btrfs_migrate_folio(struct address_space *mapping,
7289 struct folio *dst, struct folio *src,
7290 enum migrate_mode mode)
7291 {
7292 int ret = filemap_migrate_folio(mapping, dst, src, mode);
7293
7294 if (ret != MIGRATEPAGE_SUCCESS)
7295 return ret;
7296
7297 if (folio_test_ordered(src)) {
7298 folio_clear_ordered(src);
7299 folio_set_ordered(dst);
7300 }
7301
7302 return MIGRATEPAGE_SUCCESS;
7303 }
7304 #else
7305 #define btrfs_migrate_folio NULL
7306 #endif
7307
btrfs_invalidate_folio(struct folio * folio,size_t offset,size_t length)7308 static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
7309 size_t length)
7310 {
7311 struct btrfs_inode *inode = folio_to_inode(folio);
7312 struct btrfs_fs_info *fs_info = inode->root->fs_info;
7313 struct extent_io_tree *tree = &inode->io_tree;
7314 struct extent_state *cached_state = NULL;
7315 u64 page_start = folio_pos(folio);
7316 u64 page_end = page_start + folio_size(folio) - 1;
7317 u64 cur;
7318 int inode_evicting = inode->vfs_inode.i_state & I_FREEING;
7319
7320 /*
7321 * We have folio locked so no new ordered extent can be created on this
7322 * page, nor bio can be submitted for this folio.
7323 *
7324 * But already submitted bio can still be finished on this folio.
7325 * Furthermore, endio function won't skip folio which has Ordered
7326 * already cleared, so it's possible for endio and
7327 * invalidate_folio to do the same ordered extent accounting twice
7328 * on one folio.
7329 *
7330 * So here we wait for any submitted bios to finish, so that we won't
7331 * do double ordered extent accounting on the same folio.
7332 */
7333 folio_wait_writeback(folio);
7334 wait_subpage_spinlock(folio);
7335
7336 /*
7337 * For subpage case, we have call sites like
7338 * btrfs_punch_hole_lock_range() which passes range not aligned to
7339 * sectorsize.
7340 * If the range doesn't cover the full folio, we don't need to and
7341 * shouldn't clear page extent mapped, as folio->private can still
7342 * record subpage dirty bits for other part of the range.
7343 *
7344 * For cases that invalidate the full folio even the range doesn't
7345 * cover the full folio, like invalidating the last folio, we're
7346 * still safe to wait for ordered extent to finish.
7347 */
7348 if (!(offset == 0 && length == folio_size(folio))) {
7349 btrfs_release_folio(folio, GFP_NOFS);
7350 return;
7351 }
7352
7353 if (!inode_evicting)
7354 lock_extent(tree, page_start, page_end, &cached_state);
7355
7356 cur = page_start;
7357 while (cur < page_end) {
7358 struct btrfs_ordered_extent *ordered;
7359 u64 range_end;
7360 u32 range_len;
7361 u32 extra_flags = 0;
7362
7363 ordered = btrfs_lookup_first_ordered_range(inode, cur,
7364 page_end + 1 - cur);
7365 if (!ordered) {
7366 range_end = page_end;
7367 /*
7368 * No ordered extent covering this range, we are safe
7369 * to delete all extent states in the range.
7370 */
7371 extra_flags = EXTENT_CLEAR_ALL_BITS;
7372 goto next;
7373 }
7374 if (ordered->file_offset > cur) {
7375 /*
7376 * There is a range between [cur, oe->file_offset) not
7377 * covered by any ordered extent.
7378 * We are safe to delete all extent states, and handle
7379 * the ordered extent in the next iteration.
7380 */
7381 range_end = ordered->file_offset - 1;
7382 extra_flags = EXTENT_CLEAR_ALL_BITS;
7383 goto next;
7384 }
7385
7386 range_end = min(ordered->file_offset + ordered->num_bytes - 1,
7387 page_end);
7388 ASSERT(range_end + 1 - cur < U32_MAX);
7389 range_len = range_end + 1 - cur;
7390 if (!btrfs_folio_test_ordered(fs_info, folio, cur, range_len)) {
7391 /*
7392 * If Ordered is cleared, it means endio has
7393 * already been executed for the range.
7394 * We can't delete the extent states as
7395 * btrfs_finish_ordered_io() may still use some of them.
7396 */
7397 goto next;
7398 }
7399 btrfs_folio_clear_ordered(fs_info, folio, cur, range_len);
7400
7401 /*
7402 * IO on this page will never be started, so we need to account
7403 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW
7404 * here, must leave that up for the ordered extent completion.
7405 *
7406 * This will also unlock the range for incoming
7407 * btrfs_finish_ordered_io().
7408 */
7409 if (!inode_evicting)
7410 clear_extent_bit(tree, cur, range_end,
7411 EXTENT_DELALLOC |
7412 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
7413 EXTENT_DEFRAG, &cached_state);
7414
7415 spin_lock_irq(&inode->ordered_tree_lock);
7416 set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
7417 ordered->truncated_len = min(ordered->truncated_len,
7418 cur - ordered->file_offset);
7419 spin_unlock_irq(&inode->ordered_tree_lock);
7420
7421 /*
7422 * If the ordered extent has finished, we're safe to delete all
7423 * the extent states of the range, otherwise
7424 * btrfs_finish_ordered_io() will get executed by endio for
7425 * other pages, so we can't delete extent states.
7426 */
7427 if (btrfs_dec_test_ordered_pending(inode, &ordered,
7428 cur, range_end + 1 - cur)) {
7429 btrfs_finish_ordered_io(ordered);
7430 /*
7431 * The ordered extent has finished, now we're again
7432 * safe to delete all extent states of the range.
7433 */
7434 extra_flags = EXTENT_CLEAR_ALL_BITS;
7435 }
7436 next:
7437 if (ordered)
7438 btrfs_put_ordered_extent(ordered);
7439 /*
7440 * Qgroup reserved space handler
7441 * Sector(s) here will be either:
7442 *
7443 * 1) Already written to disk or bio already finished
7444 * Then its QGROUP_RESERVED bit in io_tree is already cleared.
7445 * Qgroup will be handled by its qgroup_record then.
7446 * btrfs_qgroup_free_data() call will do nothing here.
7447 *
7448 * 2) Not written to disk yet
7449 * Then btrfs_qgroup_free_data() call will clear the
7450 * QGROUP_RESERVED bit of its io_tree, and free the qgroup
7451 * reserved data space.
7452 * Since the IO will never happen for this page.
7453 */
7454 btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur, NULL);
7455 if (!inode_evicting) {
7456 clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED |
7457 EXTENT_DELALLOC | EXTENT_UPTODATE |
7458 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG |
7459 extra_flags, &cached_state);
7460 }
7461 cur = range_end + 1;
7462 }
7463 /*
7464 * We have iterated through all ordered extents of the page, the page
7465 * should not have Ordered anymore, or the above iteration
7466 * did something wrong.
7467 */
7468 ASSERT(!folio_test_ordered(folio));
7469 btrfs_folio_clear_checked(fs_info, folio, folio_pos(folio), folio_size(folio));
7470 if (!inode_evicting)
7471 __btrfs_release_folio(folio, GFP_NOFS);
7472 clear_folio_extent_mapped(folio);
7473 }
7474
btrfs_truncate(struct btrfs_inode * inode,bool skip_writeback)7475 static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
7476 {
7477 struct btrfs_truncate_control control = {
7478 .inode = inode,
7479 .ino = btrfs_ino(inode),
7480 .min_type = BTRFS_EXTENT_DATA_KEY,
7481 .clear_extent_range = true,
7482 };
7483 struct btrfs_root *root = inode->root;
7484 struct btrfs_fs_info *fs_info = root->fs_info;
7485 struct btrfs_block_rsv *rsv;
7486 int ret;
7487 struct btrfs_trans_handle *trans;
7488 u64 mask = fs_info->sectorsize - 1;
7489 const u64 min_size = btrfs_calc_metadata_size(fs_info, 1);
7490
7491 if (!skip_writeback) {
7492 ret = btrfs_wait_ordered_range(inode,
7493 inode->vfs_inode.i_size & (~mask),
7494 (u64)-1);
7495 if (ret)
7496 return ret;
7497 }
7498
7499 /*
7500 * Yes ladies and gentlemen, this is indeed ugly. We have a couple of
7501 * things going on here:
7502 *
7503 * 1) We need to reserve space to update our inode.
7504 *
7505 * 2) We need to have something to cache all the space that is going to
7506 * be free'd up by the truncate operation, but also have some slack
7507 * space reserved in case it uses space during the truncate (thank you
7508 * very much snapshotting).
7509 *
7510 * And we need these to be separate. The fact is we can use a lot of
7511 * space doing the truncate, and we have no earthly idea how much space
7512 * we will use, so we need the truncate reservation to be separate so it
7513 * doesn't end up using space reserved for updating the inode. We also
7514 * need to be able to stop the transaction and start a new one, which
7515 * means we need to be able to update the inode several times, and we
7516 * have no idea of knowing how many times that will be, so we can't just
7517 * reserve 1 item for the entirety of the operation, so that has to be
7518 * done separately as well.
7519 *
7520 * So that leaves us with
7521 *
7522 * 1) rsv - for the truncate reservation, which we will steal from the
7523 * transaction reservation.
7524 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for
7525 * updating the inode.
7526 */
7527 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
7528 if (!rsv)
7529 return -ENOMEM;
7530 rsv->size = min_size;
7531 rsv->failfast = true;
7532
7533 /*
7534 * 1 for the truncate slack space
7535 * 1 for updating the inode.
7536 */
7537 trans = btrfs_start_transaction(root, 2);
7538 if (IS_ERR(trans)) {
7539 ret = PTR_ERR(trans);
7540 goto out;
7541 }
7542
7543 /* Migrate the slack space for the truncate to our reserve */
7544 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
7545 min_size, false);
7546 /*
7547 * We have reserved 2 metadata units when we started the transaction and
7548 * min_size matches 1 unit, so this should never fail, but if it does,
7549 * it's not critical we just fail truncation.
7550 */
7551 if (WARN_ON(ret)) {
7552 btrfs_end_transaction(trans);
7553 goto out;
7554 }
7555
7556 trans->block_rsv = rsv;
7557
7558 while (1) {
7559 struct extent_state *cached_state = NULL;
7560 const u64 new_size = inode->vfs_inode.i_size;
7561 const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize);
7562
7563 control.new_size = new_size;
7564 lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
7565 /*
7566 * We want to drop from the next block forward in case this new
7567 * size is not block aligned since we will be keeping the last
7568 * block of the extent just the way it is.
7569 */
7570 btrfs_drop_extent_map_range(inode,
7571 ALIGN(new_size, fs_info->sectorsize),
7572 (u64)-1, false);
7573
7574 ret = btrfs_truncate_inode_items(trans, root, &control);
7575
7576 inode_sub_bytes(&inode->vfs_inode, control.sub_bytes);
7577 btrfs_inode_safe_disk_i_size_write(inode, control.last_size);
7578
7579 unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state);
7580
7581 trans->block_rsv = &fs_info->trans_block_rsv;
7582 if (ret != -ENOSPC && ret != -EAGAIN)
7583 break;
7584
7585 ret = btrfs_update_inode(trans, inode);
7586 if (ret)
7587 break;
7588
7589 btrfs_end_transaction(trans);
7590 btrfs_btree_balance_dirty(fs_info);
7591
7592 trans = btrfs_start_transaction(root, 2);
7593 if (IS_ERR(trans)) {
7594 ret = PTR_ERR(trans);
7595 trans = NULL;
7596 break;
7597 }
7598
7599 btrfs_block_rsv_release(fs_info, rsv, -1, NULL);
7600 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
7601 rsv, min_size, false);
7602 /*
7603 * We have reserved 2 metadata units when we started the
7604 * transaction and min_size matches 1 unit, so this should never
7605 * fail, but if it does, it's not critical we just fail truncation.
7606 */
7607 if (WARN_ON(ret))
7608 break;
7609
7610 trans->block_rsv = rsv;
7611 }
7612
7613 /*
7614 * We can't call btrfs_truncate_block inside a trans handle as we could
7615 * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we
7616 * know we've truncated everything except the last little bit, and can
7617 * do btrfs_truncate_block and then update the disk_i_size.
7618 */
7619 if (ret == BTRFS_NEED_TRUNCATE_BLOCK) {
7620 btrfs_end_transaction(trans);
7621 btrfs_btree_balance_dirty(fs_info);
7622
7623 ret = btrfs_truncate_block(inode, inode->vfs_inode.i_size, 0, 0);
7624 if (ret)
7625 goto out;
7626 trans = btrfs_start_transaction(root, 1);
7627 if (IS_ERR(trans)) {
7628 ret = PTR_ERR(trans);
7629 goto out;
7630 }
7631 btrfs_inode_safe_disk_i_size_write(inode, 0);
7632 }
7633
7634 if (trans) {
7635 int ret2;
7636
7637 trans->block_rsv = &fs_info->trans_block_rsv;
7638 ret2 = btrfs_update_inode(trans, inode);
7639 if (ret2 && !ret)
7640 ret = ret2;
7641
7642 ret2 = btrfs_end_transaction(trans);
7643 if (ret2 && !ret)
7644 ret = ret2;
7645 btrfs_btree_balance_dirty(fs_info);
7646 }
7647 out:
7648 btrfs_free_block_rsv(fs_info, rsv);
7649 /*
7650 * So if we truncate and then write and fsync we normally would just
7651 * write the extents that changed, which is a problem if we need to
7652 * first truncate that entire inode. So set this flag so we write out
7653 * all of the extents in the inode to the sync log so we're completely
7654 * safe.
7655 *
7656 * If no extents were dropped or trimmed we don't need to force the next
7657 * fsync to truncate all the inode's items from the log and re-log them
7658 * all. This means the truncate operation did not change the file size,
7659 * or changed it to a smaller size but there was only an implicit hole
7660 * between the old i_size and the new i_size, and there were no prealloc
7661 * extents beyond i_size to drop.
7662 */
7663 if (control.extents_found > 0)
7664 btrfs_set_inode_full_sync(inode);
7665
7666 return ret;
7667 }
7668
btrfs_new_subvol_inode(struct mnt_idmap * idmap,struct inode * dir)7669 struct inode *btrfs_new_subvol_inode(struct mnt_idmap *idmap,
7670 struct inode *dir)
7671 {
7672 struct inode *inode;
7673
7674 inode = new_inode(dir->i_sb);
7675 if (inode) {
7676 /*
7677 * Subvolumes don't inherit the sgid bit or the parent's gid if
7678 * the parent's sgid bit is set. This is probably a bug.
7679 */
7680 inode_init_owner(idmap, inode, NULL,
7681 S_IFDIR | (~current_umask() & S_IRWXUGO));
7682 inode->i_op = &btrfs_dir_inode_operations;
7683 inode->i_fop = &btrfs_dir_file_operations;
7684 }
7685 return inode;
7686 }
7687
btrfs_alloc_inode(struct super_block * sb)7688 struct inode *btrfs_alloc_inode(struct super_block *sb)
7689 {
7690 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
7691 struct btrfs_inode *ei;
7692 struct inode *inode;
7693
7694 ei = alloc_inode_sb(sb, btrfs_inode_cachep, GFP_KERNEL);
7695 if (!ei)
7696 return NULL;
7697
7698 ei->root = NULL;
7699 ei->generation = 0;
7700 ei->last_trans = 0;
7701 ei->last_sub_trans = 0;
7702 ei->logged_trans = 0;
7703 ei->delalloc_bytes = 0;
7704 ei->new_delalloc_bytes = 0;
7705 ei->defrag_bytes = 0;
7706 ei->disk_i_size = 0;
7707 ei->flags = 0;
7708 ei->ro_flags = 0;
7709 /*
7710 * ->index_cnt will be properly initialized later when creating a new
7711 * inode (btrfs_create_new_inode()) or when reading an existing inode
7712 * from disk (btrfs_read_locked_inode()).
7713 */
7714 ei->csum_bytes = 0;
7715 ei->dir_index = 0;
7716 ei->last_unlink_trans = 0;
7717 ei->last_reflink_trans = 0;
7718 ei->last_log_commit = 0;
7719
7720 spin_lock_init(&ei->lock);
7721 ei->outstanding_extents = 0;
7722 if (sb->s_magic != BTRFS_TEST_MAGIC)
7723 btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv,
7724 BTRFS_BLOCK_RSV_DELALLOC);
7725 ei->runtime_flags = 0;
7726 ei->prop_compress = BTRFS_COMPRESS_NONE;
7727 ei->defrag_compress = BTRFS_COMPRESS_NONE;
7728
7729 ei->delayed_node = NULL;
7730
7731 ei->i_otime_sec = 0;
7732 ei->i_otime_nsec = 0;
7733
7734 inode = &ei->vfs_inode;
7735 extent_map_tree_init(&ei->extent_tree);
7736
7737 /* This io tree sets the valid inode. */
7738 extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO);
7739 ei->io_tree.inode = ei;
7740
7741 ei->file_extent_tree = NULL;
7742
7743 mutex_init(&ei->log_mutex);
7744 spin_lock_init(&ei->ordered_tree_lock);
7745 ei->ordered_tree = RB_ROOT;
7746 ei->ordered_tree_last = NULL;
7747 INIT_LIST_HEAD(&ei->delalloc_inodes);
7748 INIT_LIST_HEAD(&ei->delayed_iput);
7749 init_rwsem(&ei->i_mmap_lock);
7750
7751 return inode;
7752 }
7753
7754 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
btrfs_test_destroy_inode(struct inode * inode)7755 void btrfs_test_destroy_inode(struct inode *inode)
7756 {
7757 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false);
7758 kfree(BTRFS_I(inode)->file_extent_tree);
7759 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
7760 }
7761 #endif
7762
btrfs_free_inode(struct inode * inode)7763 void btrfs_free_inode(struct inode *inode)
7764 {
7765 kfree(BTRFS_I(inode)->file_extent_tree);
7766 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
7767 }
7768
btrfs_destroy_inode(struct inode * vfs_inode)7769 void btrfs_destroy_inode(struct inode *vfs_inode)
7770 {
7771 struct btrfs_ordered_extent *ordered;
7772 struct btrfs_inode *inode = BTRFS_I(vfs_inode);
7773 struct btrfs_root *root = inode->root;
7774 bool freespace_inode;
7775
7776 WARN_ON(!hlist_empty(&vfs_inode->i_dentry));
7777 WARN_ON(vfs_inode->i_data.nrpages);
7778 WARN_ON(inode->block_rsv.reserved);
7779 WARN_ON(inode->block_rsv.size);
7780 WARN_ON(inode->outstanding_extents);
7781 if (!S_ISDIR(vfs_inode->i_mode)) {
7782 WARN_ON(inode->delalloc_bytes);
7783 WARN_ON(inode->new_delalloc_bytes);
7784 WARN_ON(inode->csum_bytes);
7785 }
7786 if (!root || !btrfs_is_data_reloc_root(root))
7787 WARN_ON(inode->defrag_bytes);
7788
7789 /*
7790 * This can happen where we create an inode, but somebody else also
7791 * created the same inode and we need to destroy the one we already
7792 * created.
7793 */
7794 if (!root)
7795 return;
7796
7797 /*
7798 * If this is a free space inode do not take the ordered extents lockdep
7799 * map.
7800 */
7801 freespace_inode = btrfs_is_free_space_inode(inode);
7802
7803 while (1) {
7804 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
7805 if (!ordered)
7806 break;
7807 else {
7808 btrfs_err(root->fs_info,
7809 "found ordered extent %llu %llu on inode cleanup",
7810 ordered->file_offset, ordered->num_bytes);
7811
7812 if (!freespace_inode)
7813 btrfs_lockdep_acquire(root->fs_info, btrfs_ordered_extent);
7814
7815 btrfs_remove_ordered_extent(inode, ordered);
7816 btrfs_put_ordered_extent(ordered);
7817 btrfs_put_ordered_extent(ordered);
7818 }
7819 }
7820 btrfs_qgroup_check_reserved_leak(inode);
7821 btrfs_del_inode_from_root(inode);
7822 btrfs_drop_extent_map_range(inode, 0, (u64)-1, false);
7823 btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1);
7824 btrfs_put_root(inode->root);
7825 }
7826
btrfs_drop_inode(struct inode * inode)7827 int btrfs_drop_inode(struct inode *inode)
7828 {
7829 struct btrfs_root *root = BTRFS_I(inode)->root;
7830
7831 if (root == NULL)
7832 return 1;
7833
7834 /* the snap/subvol tree is on deleting */
7835 if (btrfs_root_refs(&root->root_item) == 0)
7836 return 1;
7837 else
7838 return generic_drop_inode(inode);
7839 }
7840
init_once(void * foo)7841 static void init_once(void *foo)
7842 {
7843 struct btrfs_inode *ei = foo;
7844
7845 inode_init_once(&ei->vfs_inode);
7846 }
7847
btrfs_destroy_cachep(void)7848 void __cold btrfs_destroy_cachep(void)
7849 {
7850 /*
7851 * Make sure all delayed rcu free inodes are flushed before we
7852 * destroy cache.
7853 */
7854 rcu_barrier();
7855 kmem_cache_destroy(btrfs_inode_cachep);
7856 }
7857
btrfs_init_cachep(void)7858 int __init btrfs_init_cachep(void)
7859 {
7860 btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
7861 sizeof(struct btrfs_inode), 0,
7862 SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT,
7863 init_once);
7864 if (!btrfs_inode_cachep)
7865 return -ENOMEM;
7866
7867 return 0;
7868 }
7869
btrfs_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int flags)7870 static int btrfs_getattr(struct mnt_idmap *idmap,
7871 const struct path *path, struct kstat *stat,
7872 u32 request_mask, unsigned int flags)
7873 {
7874 u64 delalloc_bytes;
7875 u64 inode_bytes;
7876 struct inode *inode = d_inode(path->dentry);
7877 u32 blocksize = btrfs_sb(inode->i_sb)->sectorsize;
7878 u32 bi_flags = BTRFS_I(inode)->flags;
7879 u32 bi_ro_flags = BTRFS_I(inode)->ro_flags;
7880
7881 stat->result_mask |= STATX_BTIME;
7882 stat->btime.tv_sec = BTRFS_I(inode)->i_otime_sec;
7883 stat->btime.tv_nsec = BTRFS_I(inode)->i_otime_nsec;
7884 if (bi_flags & BTRFS_INODE_APPEND)
7885 stat->attributes |= STATX_ATTR_APPEND;
7886 if (bi_flags & BTRFS_INODE_COMPRESS)
7887 stat->attributes |= STATX_ATTR_COMPRESSED;
7888 if (bi_flags & BTRFS_INODE_IMMUTABLE)
7889 stat->attributes |= STATX_ATTR_IMMUTABLE;
7890 if (bi_flags & BTRFS_INODE_NODUMP)
7891 stat->attributes |= STATX_ATTR_NODUMP;
7892 if (bi_ro_flags & BTRFS_INODE_RO_VERITY)
7893 stat->attributes |= STATX_ATTR_VERITY;
7894
7895 stat->attributes_mask |= (STATX_ATTR_APPEND |
7896 STATX_ATTR_COMPRESSED |
7897 STATX_ATTR_IMMUTABLE |
7898 STATX_ATTR_NODUMP);
7899
7900 generic_fillattr(idmap, request_mask, inode, stat);
7901 stat->dev = BTRFS_I(inode)->root->anon_dev;
7902
7903 stat->subvol = BTRFS_I(inode)->root->root_key.objectid;
7904 stat->result_mask |= STATX_SUBVOL;
7905
7906 spin_lock(&BTRFS_I(inode)->lock);
7907 delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes;
7908 inode_bytes = inode_get_bytes(inode);
7909 spin_unlock(&BTRFS_I(inode)->lock);
7910 stat->blocks = (ALIGN(inode_bytes, blocksize) +
7911 ALIGN(delalloc_bytes, blocksize)) >> SECTOR_SHIFT;
7912 return 0;
7913 }
7914
btrfs_rename_exchange(struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry)7915 static int btrfs_rename_exchange(struct inode *old_dir,
7916 struct dentry *old_dentry,
7917 struct inode *new_dir,
7918 struct dentry *new_dentry)
7919 {
7920 struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir);
7921 struct btrfs_trans_handle *trans;
7922 unsigned int trans_num_items;
7923 struct btrfs_root *root = BTRFS_I(old_dir)->root;
7924 struct btrfs_root *dest = BTRFS_I(new_dir)->root;
7925 struct inode *new_inode = new_dentry->d_inode;
7926 struct inode *old_inode = old_dentry->d_inode;
7927 struct btrfs_rename_ctx old_rename_ctx;
7928 struct btrfs_rename_ctx new_rename_ctx;
7929 u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
7930 u64 new_ino = btrfs_ino(BTRFS_I(new_inode));
7931 u64 old_idx = 0;
7932 u64 new_idx = 0;
7933 int ret;
7934 int ret2;
7935 bool need_abort = false;
7936 struct fscrypt_name old_fname, new_fname;
7937 struct fscrypt_str *old_name, *new_name;
7938
7939 /*
7940 * For non-subvolumes allow exchange only within one subvolume, in the
7941 * same inode namespace. Two subvolumes (represented as directory) can
7942 * be exchanged as they're a logical link and have a fixed inode number.
7943 */
7944 if (root != dest &&
7945 (old_ino != BTRFS_FIRST_FREE_OBJECTID ||
7946 new_ino != BTRFS_FIRST_FREE_OBJECTID))
7947 return -EXDEV;
7948
7949 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname);
7950 if (ret)
7951 return ret;
7952
7953 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname);
7954 if (ret) {
7955 fscrypt_free_filename(&old_fname);
7956 return ret;
7957 }
7958
7959 old_name = &old_fname.disk_name;
7960 new_name = &new_fname.disk_name;
7961
7962 /* close the race window with snapshot create/destroy ioctl */
7963 if (old_ino == BTRFS_FIRST_FREE_OBJECTID ||
7964 new_ino == BTRFS_FIRST_FREE_OBJECTID)
7965 down_read(&fs_info->subvol_sem);
7966
7967 /*
7968 * For each inode:
7969 * 1 to remove old dir item
7970 * 1 to remove old dir index
7971 * 1 to add new dir item
7972 * 1 to add new dir index
7973 * 1 to update parent inode
7974 *
7975 * If the parents are the same, we only need to account for one
7976 */
7977 trans_num_items = (old_dir == new_dir ? 9 : 10);
7978 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
7979 /*
7980 * 1 to remove old root ref
7981 * 1 to remove old root backref
7982 * 1 to add new root ref
7983 * 1 to add new root backref
7984 */
7985 trans_num_items += 4;
7986 } else {
7987 /*
7988 * 1 to update inode item
7989 * 1 to remove old inode ref
7990 * 1 to add new inode ref
7991 */
7992 trans_num_items += 3;
7993 }
7994 if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
7995 trans_num_items += 4;
7996 else
7997 trans_num_items += 3;
7998 trans = btrfs_start_transaction(root, trans_num_items);
7999 if (IS_ERR(trans)) {
8000 ret = PTR_ERR(trans);
8001 goto out_notrans;
8002 }
8003
8004 if (dest != root) {
8005 ret = btrfs_record_root_in_trans(trans, dest);
8006 if (ret)
8007 goto out_fail;
8008 }
8009
8010 /*
8011 * We need to find a free sequence number both in the source and
8012 * in the destination directory for the exchange.
8013 */
8014 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx);
8015 if (ret)
8016 goto out_fail;
8017 ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx);
8018 if (ret)
8019 goto out_fail;
8020
8021 BTRFS_I(old_inode)->dir_index = 0ULL;
8022 BTRFS_I(new_inode)->dir_index = 0ULL;
8023
8024 /* Reference for the source. */
8025 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8026 /* force full log commit if subvolume involved. */
8027 btrfs_set_log_full_commit(trans);
8028 } else {
8029 ret = btrfs_insert_inode_ref(trans, dest, new_name, old_ino,
8030 btrfs_ino(BTRFS_I(new_dir)),
8031 old_idx);
8032 if (ret)
8033 goto out_fail;
8034 need_abort = true;
8035 }
8036
8037 /* And now for the dest. */
8038 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
8039 /* force full log commit if subvolume involved. */
8040 btrfs_set_log_full_commit(trans);
8041 } else {
8042 ret = btrfs_insert_inode_ref(trans, root, old_name, new_ino,
8043 btrfs_ino(BTRFS_I(old_dir)),
8044 new_idx);
8045 if (ret) {
8046 if (need_abort)
8047 btrfs_abort_transaction(trans, ret);
8048 goto out_fail;
8049 }
8050 }
8051
8052 /* Update inode version and ctime/mtime. */
8053 inode_inc_iversion(old_dir);
8054 inode_inc_iversion(new_dir);
8055 inode_inc_iversion(old_inode);
8056 inode_inc_iversion(new_inode);
8057 simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
8058
8059 if (old_dentry->d_parent != new_dentry->d_parent) {
8060 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
8061 BTRFS_I(old_inode), true);
8062 btrfs_record_unlink_dir(trans, BTRFS_I(new_dir),
8063 BTRFS_I(new_inode), true);
8064 }
8065
8066 /* src is a subvolume */
8067 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8068 ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry);
8069 if (ret) {
8070 btrfs_abort_transaction(trans, ret);
8071 goto out_fail;
8072 }
8073 } else { /* src is an inode */
8074 ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
8075 BTRFS_I(old_dentry->d_inode),
8076 old_name, &old_rename_ctx);
8077 if (ret) {
8078 btrfs_abort_transaction(trans, ret);
8079 goto out_fail;
8080 }
8081 ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
8082 if (ret) {
8083 btrfs_abort_transaction(trans, ret);
8084 goto out_fail;
8085 }
8086 }
8087
8088 /* dest is a subvolume */
8089 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
8090 ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
8091 if (ret) {
8092 btrfs_abort_transaction(trans, ret);
8093 goto out_fail;
8094 }
8095 } else { /* dest is an inode */
8096 ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir),
8097 BTRFS_I(new_dentry->d_inode),
8098 new_name, &new_rename_ctx);
8099 if (ret) {
8100 btrfs_abort_transaction(trans, ret);
8101 goto out_fail;
8102 }
8103 ret = btrfs_update_inode(trans, BTRFS_I(new_inode));
8104 if (ret) {
8105 btrfs_abort_transaction(trans, ret);
8106 goto out_fail;
8107 }
8108 }
8109
8110 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
8111 new_name, 0, old_idx);
8112 if (ret) {
8113 btrfs_abort_transaction(trans, ret);
8114 goto out_fail;
8115 }
8116
8117 ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode),
8118 old_name, 0, new_idx);
8119 if (ret) {
8120 btrfs_abort_transaction(trans, ret);
8121 goto out_fail;
8122 }
8123
8124 if (old_inode->i_nlink == 1)
8125 BTRFS_I(old_inode)->dir_index = old_idx;
8126 if (new_inode->i_nlink == 1)
8127 BTRFS_I(new_inode)->dir_index = new_idx;
8128
8129 /*
8130 * Now pin the logs of the roots. We do it to ensure that no other task
8131 * can sync the logs while we are in progress with the rename, because
8132 * that could result in an inconsistency in case any of the inodes that
8133 * are part of this rename operation were logged before.
8134 */
8135 if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
8136 btrfs_pin_log_trans(root);
8137 if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
8138 btrfs_pin_log_trans(dest);
8139
8140 /* Do the log updates for all inodes. */
8141 if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
8142 btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
8143 old_rename_ctx.index, new_dentry->d_parent);
8144 if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
8145 btrfs_log_new_name(trans, new_dentry, BTRFS_I(new_dir),
8146 new_rename_ctx.index, old_dentry->d_parent);
8147
8148 /* Now unpin the logs. */
8149 if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
8150 btrfs_end_log_trans(root);
8151 if (new_ino != BTRFS_FIRST_FREE_OBJECTID)
8152 btrfs_end_log_trans(dest);
8153 out_fail:
8154 ret2 = btrfs_end_transaction(trans);
8155 ret = ret ? ret : ret2;
8156 out_notrans:
8157 if (new_ino == BTRFS_FIRST_FREE_OBJECTID ||
8158 old_ino == BTRFS_FIRST_FREE_OBJECTID)
8159 up_read(&fs_info->subvol_sem);
8160
8161 fscrypt_free_filename(&new_fname);
8162 fscrypt_free_filename(&old_fname);
8163 return ret;
8164 }
8165
new_whiteout_inode(struct mnt_idmap * idmap,struct inode * dir)8166 static struct inode *new_whiteout_inode(struct mnt_idmap *idmap,
8167 struct inode *dir)
8168 {
8169 struct inode *inode;
8170
8171 inode = new_inode(dir->i_sb);
8172 if (inode) {
8173 inode_init_owner(idmap, inode, dir,
8174 S_IFCHR | WHITEOUT_MODE);
8175 inode->i_op = &btrfs_special_inode_operations;
8176 init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
8177 }
8178 return inode;
8179 }
8180
btrfs_rename(struct mnt_idmap * idmap,struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry,unsigned int flags)8181 static int btrfs_rename(struct mnt_idmap *idmap,
8182 struct inode *old_dir, struct dentry *old_dentry,
8183 struct inode *new_dir, struct dentry *new_dentry,
8184 unsigned int flags)
8185 {
8186 struct btrfs_fs_info *fs_info = inode_to_fs_info(old_dir);
8187 struct btrfs_new_inode_args whiteout_args = {
8188 .dir = old_dir,
8189 .dentry = old_dentry,
8190 };
8191 struct btrfs_trans_handle *trans;
8192 unsigned int trans_num_items;
8193 struct btrfs_root *root = BTRFS_I(old_dir)->root;
8194 struct btrfs_root *dest = BTRFS_I(new_dir)->root;
8195 struct inode *new_inode = d_inode(new_dentry);
8196 struct inode *old_inode = d_inode(old_dentry);
8197 struct btrfs_rename_ctx rename_ctx;
8198 u64 index = 0;
8199 int ret;
8200 int ret2;
8201 u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
8202 struct fscrypt_name old_fname, new_fname;
8203
8204 if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
8205 return -EPERM;
8206
8207 /* we only allow rename subvolume link between subvolumes */
8208 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
8209 return -EXDEV;
8210
8211 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
8212 (new_inode && btrfs_ino(BTRFS_I(new_inode)) == BTRFS_FIRST_FREE_OBJECTID))
8213 return -ENOTEMPTY;
8214
8215 if (S_ISDIR(old_inode->i_mode) && new_inode &&
8216 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
8217 return -ENOTEMPTY;
8218
8219 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname);
8220 if (ret)
8221 return ret;
8222
8223 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname);
8224 if (ret) {
8225 fscrypt_free_filename(&old_fname);
8226 return ret;
8227 }
8228
8229 /* check for collisions, even if the name isn't there */
8230 ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, &new_fname.disk_name);
8231 if (ret) {
8232 if (ret == -EEXIST) {
8233 /* we shouldn't get
8234 * eexist without a new_inode */
8235 if (WARN_ON(!new_inode)) {
8236 goto out_fscrypt_names;
8237 }
8238 } else {
8239 /* maybe -EOVERFLOW */
8240 goto out_fscrypt_names;
8241 }
8242 }
8243 ret = 0;
8244
8245 /*
8246 * we're using rename to replace one file with another. Start IO on it
8247 * now so we don't add too much work to the end of the transaction
8248 */
8249 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
8250 filemap_flush(old_inode->i_mapping);
8251
8252 if (flags & RENAME_WHITEOUT) {
8253 whiteout_args.inode = new_whiteout_inode(idmap, old_dir);
8254 if (!whiteout_args.inode) {
8255 ret = -ENOMEM;
8256 goto out_fscrypt_names;
8257 }
8258 ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items);
8259 if (ret)
8260 goto out_whiteout_inode;
8261 } else {
8262 /* 1 to update the old parent inode. */
8263 trans_num_items = 1;
8264 }
8265
8266 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
8267 /* Close the race window with snapshot create/destroy ioctl */
8268 down_read(&fs_info->subvol_sem);
8269 /*
8270 * 1 to remove old root ref
8271 * 1 to remove old root backref
8272 * 1 to add new root ref
8273 * 1 to add new root backref
8274 */
8275 trans_num_items += 4;
8276 } else {
8277 /*
8278 * 1 to update inode
8279 * 1 to remove old inode ref
8280 * 1 to add new inode ref
8281 */
8282 trans_num_items += 3;
8283 }
8284 /*
8285 * 1 to remove old dir item
8286 * 1 to remove old dir index
8287 * 1 to add new dir item
8288 * 1 to add new dir index
8289 */
8290 trans_num_items += 4;
8291 /* 1 to update new parent inode if it's not the same as the old parent */
8292 if (new_dir != old_dir)
8293 trans_num_items++;
8294 if (new_inode) {
8295 /*
8296 * 1 to update inode
8297 * 1 to remove inode ref
8298 * 1 to remove dir item
8299 * 1 to remove dir index
8300 * 1 to possibly add orphan item
8301 */
8302 trans_num_items += 5;
8303 }
8304 trans = btrfs_start_transaction(root, trans_num_items);
8305 if (IS_ERR(trans)) {
8306 ret = PTR_ERR(trans);
8307 goto out_notrans;
8308 }
8309
8310 if (dest != root) {
8311 ret = btrfs_record_root_in_trans(trans, dest);
8312 if (ret)
8313 goto out_fail;
8314 }
8315
8316 ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index);
8317 if (ret)
8318 goto out_fail;
8319
8320 BTRFS_I(old_inode)->dir_index = 0ULL;
8321 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
8322 /* force full log commit if subvolume involved. */
8323 btrfs_set_log_full_commit(trans);
8324 } else {
8325 ret = btrfs_insert_inode_ref(trans, dest, &new_fname.disk_name,
8326 old_ino, btrfs_ino(BTRFS_I(new_dir)),
8327 index);
8328 if (ret)
8329 goto out_fail;
8330 }
8331
8332 inode_inc_iversion(old_dir);
8333 inode_inc_iversion(new_dir);
8334 inode_inc_iversion(old_inode);
8335 simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
8336
8337 if (old_dentry->d_parent != new_dentry->d_parent)
8338 btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
8339 BTRFS_I(old_inode), true);
8340
8341 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
8342 ret = btrfs_unlink_subvol(trans, BTRFS_I(old_dir), old_dentry);
8343 if (ret) {
8344 btrfs_abort_transaction(trans, ret);
8345 goto out_fail;
8346 }
8347 } else {
8348 ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
8349 BTRFS_I(d_inode(old_dentry)),
8350 &old_fname.disk_name, &rename_ctx);
8351 if (ret) {
8352 btrfs_abort_transaction(trans, ret);
8353 goto out_fail;
8354 }
8355 ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
8356 if (ret) {
8357 btrfs_abort_transaction(trans, ret);
8358 goto out_fail;
8359 }
8360 }
8361
8362 if (new_inode) {
8363 inode_inc_iversion(new_inode);
8364 if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
8365 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
8366 ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
8367 if (ret) {
8368 btrfs_abort_transaction(trans, ret);
8369 goto out_fail;
8370 }
8371 BUG_ON(new_inode->i_nlink == 0);
8372 } else {
8373 ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir),
8374 BTRFS_I(d_inode(new_dentry)),
8375 &new_fname.disk_name);
8376 if (ret) {
8377 btrfs_abort_transaction(trans, ret);
8378 goto out_fail;
8379 }
8380 }
8381 if (new_inode->i_nlink == 0) {
8382 ret = btrfs_orphan_add(trans,
8383 BTRFS_I(d_inode(new_dentry)));
8384 if (ret) {
8385 btrfs_abort_transaction(trans, ret);
8386 goto out_fail;
8387 }
8388 }
8389 }
8390
8391 ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
8392 &new_fname.disk_name, 0, index);
8393 if (ret) {
8394 btrfs_abort_transaction(trans, ret);
8395 goto out_fail;
8396 }
8397
8398 if (old_inode->i_nlink == 1)
8399 BTRFS_I(old_inode)->dir_index = index;
8400
8401 if (old_ino != BTRFS_FIRST_FREE_OBJECTID)
8402 btrfs_log_new_name(trans, old_dentry, BTRFS_I(old_dir),
8403 rename_ctx.index, new_dentry->d_parent);
8404
8405 if (flags & RENAME_WHITEOUT) {
8406 ret = btrfs_create_new_inode(trans, &whiteout_args);
8407 if (ret) {
8408 btrfs_abort_transaction(trans, ret);
8409 goto out_fail;
8410 } else {
8411 unlock_new_inode(whiteout_args.inode);
8412 iput(whiteout_args.inode);
8413 whiteout_args.inode = NULL;
8414 }
8415 }
8416 out_fail:
8417 ret2 = btrfs_end_transaction(trans);
8418 ret = ret ? ret : ret2;
8419 out_notrans:
8420 if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
8421 up_read(&fs_info->subvol_sem);
8422 if (flags & RENAME_WHITEOUT)
8423 btrfs_new_inode_args_destroy(&whiteout_args);
8424 out_whiteout_inode:
8425 if (flags & RENAME_WHITEOUT)
8426 iput(whiteout_args.inode);
8427 out_fscrypt_names:
8428 fscrypt_free_filename(&old_fname);
8429 fscrypt_free_filename(&new_fname);
8430 return ret;
8431 }
8432
btrfs_rename2(struct mnt_idmap * idmap,struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry,unsigned int flags)8433 static int btrfs_rename2(struct mnt_idmap *idmap, struct inode *old_dir,
8434 struct dentry *old_dentry, struct inode *new_dir,
8435 struct dentry *new_dentry, unsigned int flags)
8436 {
8437 int ret;
8438
8439 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
8440 return -EINVAL;
8441
8442 if (flags & RENAME_EXCHANGE)
8443 ret = btrfs_rename_exchange(old_dir, old_dentry, new_dir,
8444 new_dentry);
8445 else
8446 ret = btrfs_rename(idmap, old_dir, old_dentry, new_dir,
8447 new_dentry, flags);
8448
8449 btrfs_btree_balance_dirty(BTRFS_I(new_dir)->root->fs_info);
8450
8451 return ret;
8452 }
8453
8454 struct btrfs_delalloc_work {
8455 struct inode *inode;
8456 struct completion completion;
8457 struct list_head list;
8458 struct btrfs_work work;
8459 };
8460
btrfs_run_delalloc_work(struct btrfs_work * work)8461 static void btrfs_run_delalloc_work(struct btrfs_work *work)
8462 {
8463 struct btrfs_delalloc_work *delalloc_work;
8464 struct inode *inode;
8465
8466 delalloc_work = container_of(work, struct btrfs_delalloc_work,
8467 work);
8468 inode = delalloc_work->inode;
8469 filemap_flush(inode->i_mapping);
8470 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
8471 &BTRFS_I(inode)->runtime_flags))
8472 filemap_flush(inode->i_mapping);
8473
8474 iput(inode);
8475 complete(&delalloc_work->completion);
8476 }
8477
btrfs_alloc_delalloc_work(struct inode * inode)8478 static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode)
8479 {
8480 struct btrfs_delalloc_work *work;
8481
8482 work = kmalloc(sizeof(*work), GFP_NOFS);
8483 if (!work)
8484 return NULL;
8485
8486 init_completion(&work->completion);
8487 INIT_LIST_HEAD(&work->list);
8488 work->inode = inode;
8489 btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL);
8490
8491 return work;
8492 }
8493
8494 /*
8495 * some fairly slow code that needs optimization. This walks the list
8496 * of all the inodes with pending delalloc and forces them to disk.
8497 */
start_delalloc_inodes(struct btrfs_root * root,struct writeback_control * wbc,bool snapshot,bool in_reclaim_context)8498 static int start_delalloc_inodes(struct btrfs_root *root,
8499 struct writeback_control *wbc, bool snapshot,
8500 bool in_reclaim_context)
8501 {
8502 struct btrfs_inode *binode;
8503 struct inode *inode;
8504 struct btrfs_delalloc_work *work, *next;
8505 LIST_HEAD(works);
8506 LIST_HEAD(splice);
8507 int ret = 0;
8508 bool full_flush = wbc->nr_to_write == LONG_MAX;
8509
8510 mutex_lock(&root->delalloc_mutex);
8511 spin_lock(&root->delalloc_lock);
8512 list_splice_init(&root->delalloc_inodes, &splice);
8513 while (!list_empty(&splice)) {
8514 binode = list_entry(splice.next, struct btrfs_inode,
8515 delalloc_inodes);
8516
8517 list_move_tail(&binode->delalloc_inodes,
8518 &root->delalloc_inodes);
8519
8520 if (in_reclaim_context &&
8521 test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags))
8522 continue;
8523
8524 inode = igrab(&binode->vfs_inode);
8525 if (!inode) {
8526 cond_resched_lock(&root->delalloc_lock);
8527 continue;
8528 }
8529 spin_unlock(&root->delalloc_lock);
8530
8531 if (snapshot)
8532 set_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
8533 &binode->runtime_flags);
8534 if (full_flush) {
8535 work = btrfs_alloc_delalloc_work(inode);
8536 if (!work) {
8537 iput(inode);
8538 ret = -ENOMEM;
8539 goto out;
8540 }
8541 list_add_tail(&work->list, &works);
8542 btrfs_queue_work(root->fs_info->flush_workers,
8543 &work->work);
8544 } else {
8545 ret = filemap_fdatawrite_wbc(inode->i_mapping, wbc);
8546 btrfs_add_delayed_iput(BTRFS_I(inode));
8547 if (ret || wbc->nr_to_write <= 0)
8548 goto out;
8549 }
8550 cond_resched();
8551 spin_lock(&root->delalloc_lock);
8552 }
8553 spin_unlock(&root->delalloc_lock);
8554
8555 out:
8556 list_for_each_entry_safe(work, next, &works, list) {
8557 list_del_init(&work->list);
8558 wait_for_completion(&work->completion);
8559 kfree(work);
8560 }
8561
8562 if (!list_empty(&splice)) {
8563 spin_lock(&root->delalloc_lock);
8564 list_splice_tail(&splice, &root->delalloc_inodes);
8565 spin_unlock(&root->delalloc_lock);
8566 }
8567 mutex_unlock(&root->delalloc_mutex);
8568 return ret;
8569 }
8570
btrfs_start_delalloc_snapshot(struct btrfs_root * root,bool in_reclaim_context)8571 int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context)
8572 {
8573 struct writeback_control wbc = {
8574 .nr_to_write = LONG_MAX,
8575 .sync_mode = WB_SYNC_NONE,
8576 .range_start = 0,
8577 .range_end = LLONG_MAX,
8578 };
8579 struct btrfs_fs_info *fs_info = root->fs_info;
8580
8581 if (BTRFS_FS_ERROR(fs_info))
8582 return -EROFS;
8583
8584 return start_delalloc_inodes(root, &wbc, true, in_reclaim_context);
8585 }
8586
btrfs_start_delalloc_roots(struct btrfs_fs_info * fs_info,long nr,bool in_reclaim_context)8587 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
8588 bool in_reclaim_context)
8589 {
8590 struct writeback_control wbc = {
8591 .nr_to_write = nr,
8592 .sync_mode = WB_SYNC_NONE,
8593 .range_start = 0,
8594 .range_end = LLONG_MAX,
8595 };
8596 struct btrfs_root *root;
8597 LIST_HEAD(splice);
8598 int ret;
8599
8600 if (BTRFS_FS_ERROR(fs_info))
8601 return -EROFS;
8602
8603 mutex_lock(&fs_info->delalloc_root_mutex);
8604 spin_lock(&fs_info->delalloc_root_lock);
8605 list_splice_init(&fs_info->delalloc_roots, &splice);
8606 while (!list_empty(&splice)) {
8607 /*
8608 * Reset nr_to_write here so we know that we're doing a full
8609 * flush.
8610 */
8611 if (nr == LONG_MAX)
8612 wbc.nr_to_write = LONG_MAX;
8613
8614 root = list_first_entry(&splice, struct btrfs_root,
8615 delalloc_root);
8616 root = btrfs_grab_root(root);
8617 BUG_ON(!root);
8618 list_move_tail(&root->delalloc_root,
8619 &fs_info->delalloc_roots);
8620 spin_unlock(&fs_info->delalloc_root_lock);
8621
8622 ret = start_delalloc_inodes(root, &wbc, false, in_reclaim_context);
8623 btrfs_put_root(root);
8624 if (ret < 0 || wbc.nr_to_write <= 0)
8625 goto out;
8626 spin_lock(&fs_info->delalloc_root_lock);
8627 }
8628 spin_unlock(&fs_info->delalloc_root_lock);
8629
8630 ret = 0;
8631 out:
8632 if (!list_empty(&splice)) {
8633 spin_lock(&fs_info->delalloc_root_lock);
8634 list_splice_tail(&splice, &fs_info->delalloc_roots);
8635 spin_unlock(&fs_info->delalloc_root_lock);
8636 }
8637 mutex_unlock(&fs_info->delalloc_root_mutex);
8638 return ret;
8639 }
8640
btrfs_symlink(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,const char * symname)8641 static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
8642 struct dentry *dentry, const char *symname)
8643 {
8644 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
8645 struct btrfs_trans_handle *trans;
8646 struct btrfs_root *root = BTRFS_I(dir)->root;
8647 struct btrfs_path *path;
8648 struct btrfs_key key;
8649 struct inode *inode;
8650 struct btrfs_new_inode_args new_inode_args = {
8651 .dir = dir,
8652 .dentry = dentry,
8653 };
8654 unsigned int trans_num_items;
8655 int err;
8656 int name_len;
8657 int datasize;
8658 unsigned long ptr;
8659 struct btrfs_file_extent_item *ei;
8660 struct extent_buffer *leaf;
8661
8662 name_len = strlen(symname);
8663 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
8664 return -ENAMETOOLONG;
8665
8666 inode = new_inode(dir->i_sb);
8667 if (!inode)
8668 return -ENOMEM;
8669 inode_init_owner(idmap, inode, dir, S_IFLNK | S_IRWXUGO);
8670 inode->i_op = &btrfs_symlink_inode_operations;
8671 inode_nohighmem(inode);
8672 inode->i_mapping->a_ops = &btrfs_aops;
8673 btrfs_i_size_write(BTRFS_I(inode), name_len);
8674 inode_set_bytes(inode, name_len);
8675
8676 new_inode_args.inode = inode;
8677 err = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
8678 if (err)
8679 goto out_inode;
8680 /* 1 additional item for the inline extent */
8681 trans_num_items++;
8682
8683 trans = btrfs_start_transaction(root, trans_num_items);
8684 if (IS_ERR(trans)) {
8685 err = PTR_ERR(trans);
8686 goto out_new_inode_args;
8687 }
8688
8689 err = btrfs_create_new_inode(trans, &new_inode_args);
8690 if (err)
8691 goto out;
8692
8693 path = btrfs_alloc_path();
8694 if (!path) {
8695 err = -ENOMEM;
8696 btrfs_abort_transaction(trans, err);
8697 discard_new_inode(inode);
8698 inode = NULL;
8699 goto out;
8700 }
8701 key.objectid = btrfs_ino(BTRFS_I(inode));
8702 key.offset = 0;
8703 key.type = BTRFS_EXTENT_DATA_KEY;
8704 datasize = btrfs_file_extent_calc_inline_size(name_len);
8705 err = btrfs_insert_empty_item(trans, root, path, &key,
8706 datasize);
8707 if (err) {
8708 btrfs_abort_transaction(trans, err);
8709 btrfs_free_path(path);
8710 discard_new_inode(inode);
8711 inode = NULL;
8712 goto out;
8713 }
8714 leaf = path->nodes[0];
8715 ei = btrfs_item_ptr(leaf, path->slots[0],
8716 struct btrfs_file_extent_item);
8717 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
8718 btrfs_set_file_extent_type(leaf, ei,
8719 BTRFS_FILE_EXTENT_INLINE);
8720 btrfs_set_file_extent_encryption(leaf, ei, 0);
8721 btrfs_set_file_extent_compression(leaf, ei, 0);
8722 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
8723 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
8724
8725 ptr = btrfs_file_extent_inline_start(ei);
8726 write_extent_buffer(leaf, symname, ptr, name_len);
8727 btrfs_free_path(path);
8728
8729 d_instantiate_new(dentry, inode);
8730 err = 0;
8731 out:
8732 btrfs_end_transaction(trans);
8733 btrfs_btree_balance_dirty(fs_info);
8734 out_new_inode_args:
8735 btrfs_new_inode_args_destroy(&new_inode_args);
8736 out_inode:
8737 if (err)
8738 iput(inode);
8739 return err;
8740 }
8741
insert_prealloc_file_extent(struct btrfs_trans_handle * trans_in,struct btrfs_inode * inode,struct btrfs_key * ins,u64 file_offset)8742 static struct btrfs_trans_handle *insert_prealloc_file_extent(
8743 struct btrfs_trans_handle *trans_in,
8744 struct btrfs_inode *inode,
8745 struct btrfs_key *ins,
8746 u64 file_offset)
8747 {
8748 struct btrfs_file_extent_item stack_fi;
8749 struct btrfs_replace_extent_info extent_info;
8750 struct btrfs_trans_handle *trans = trans_in;
8751 struct btrfs_path *path;
8752 u64 start = ins->objectid;
8753 u64 len = ins->offset;
8754 u64 qgroup_released = 0;
8755 int ret;
8756
8757 memset(&stack_fi, 0, sizeof(stack_fi));
8758
8759 btrfs_set_stack_file_extent_type(&stack_fi, BTRFS_FILE_EXTENT_PREALLOC);
8760 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, start);
8761 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi, len);
8762 btrfs_set_stack_file_extent_num_bytes(&stack_fi, len);
8763 btrfs_set_stack_file_extent_ram_bytes(&stack_fi, len);
8764 btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE);
8765 /* Encryption and other encoding is reserved and all 0 */
8766
8767 ret = btrfs_qgroup_release_data(inode, file_offset, len, &qgroup_released);
8768 if (ret < 0)
8769 return ERR_PTR(ret);
8770
8771 if (trans) {
8772 ret = insert_reserved_file_extent(trans, inode,
8773 file_offset, &stack_fi,
8774 true, qgroup_released);
8775 if (ret)
8776 goto free_qgroup;
8777 return trans;
8778 }
8779
8780 extent_info.disk_offset = start;
8781 extent_info.disk_len = len;
8782 extent_info.data_offset = 0;
8783 extent_info.data_len = len;
8784 extent_info.file_offset = file_offset;
8785 extent_info.extent_buf = (char *)&stack_fi;
8786 extent_info.is_new_extent = true;
8787 extent_info.update_times = true;
8788 extent_info.qgroup_reserved = qgroup_released;
8789 extent_info.insertions = 0;
8790
8791 path = btrfs_alloc_path();
8792 if (!path) {
8793 ret = -ENOMEM;
8794 goto free_qgroup;
8795 }
8796
8797 ret = btrfs_replace_file_extents(inode, path, file_offset,
8798 file_offset + len - 1, &extent_info,
8799 &trans);
8800 btrfs_free_path(path);
8801 if (ret)
8802 goto free_qgroup;
8803 return trans;
8804
8805 free_qgroup:
8806 /*
8807 * We have released qgroup data range at the beginning of the function,
8808 * and normally qgroup_released bytes will be freed when committing
8809 * transaction.
8810 * But if we error out early, we have to free what we have released
8811 * or we leak qgroup data reservation.
8812 */
8813 btrfs_qgroup_free_refroot(inode->root->fs_info,
8814 btrfs_root_id(inode->root), qgroup_released,
8815 BTRFS_QGROUP_RSV_DATA);
8816 return ERR_PTR(ret);
8817 }
8818
__btrfs_prealloc_file_range(struct inode * inode,int mode,u64 start,u64 num_bytes,u64 min_size,loff_t actual_len,u64 * alloc_hint,struct btrfs_trans_handle * trans)8819 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
8820 u64 start, u64 num_bytes, u64 min_size,
8821 loff_t actual_len, u64 *alloc_hint,
8822 struct btrfs_trans_handle *trans)
8823 {
8824 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
8825 struct extent_map *em;
8826 struct btrfs_root *root = BTRFS_I(inode)->root;
8827 struct btrfs_key ins;
8828 u64 cur_offset = start;
8829 u64 clear_offset = start;
8830 u64 i_size;
8831 u64 cur_bytes;
8832 u64 last_alloc = (u64)-1;
8833 int ret = 0;
8834 bool own_trans = true;
8835 u64 end = start + num_bytes - 1;
8836
8837 if (trans)
8838 own_trans = false;
8839 while (num_bytes > 0) {
8840 cur_bytes = min_t(u64, num_bytes, SZ_256M);
8841 cur_bytes = max(cur_bytes, min_size);
8842 /*
8843 * If we are severely fragmented we could end up with really
8844 * small allocations, so if the allocator is returning small
8845 * chunks lets make its job easier by only searching for those
8846 * sized chunks.
8847 */
8848 cur_bytes = min(cur_bytes, last_alloc);
8849 ret = btrfs_reserve_extent(root, cur_bytes, cur_bytes,
8850 min_size, 0, *alloc_hint, &ins, 1, 0);
8851 if (ret)
8852 break;
8853
8854 /*
8855 * We've reserved this space, and thus converted it from
8856 * ->bytes_may_use to ->bytes_reserved. Any error that happens
8857 * from here on out we will only need to clear our reservation
8858 * for the remaining unreserved area, so advance our
8859 * clear_offset by our extent size.
8860 */
8861 clear_offset += ins.offset;
8862
8863 last_alloc = ins.offset;
8864 trans = insert_prealloc_file_extent(trans, BTRFS_I(inode),
8865 &ins, cur_offset);
8866 /*
8867 * Now that we inserted the prealloc extent we can finally
8868 * decrement the number of reservations in the block group.
8869 * If we did it before, we could race with relocation and have
8870 * relocation miss the reserved extent, making it fail later.
8871 */
8872 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
8873 if (IS_ERR(trans)) {
8874 ret = PTR_ERR(trans);
8875 btrfs_free_reserved_extent(fs_info, ins.objectid,
8876 ins.offset, 0);
8877 break;
8878 }
8879
8880 em = alloc_extent_map();
8881 if (!em) {
8882 btrfs_drop_extent_map_range(BTRFS_I(inode), cur_offset,
8883 cur_offset + ins.offset - 1, false);
8884 btrfs_set_inode_full_sync(BTRFS_I(inode));
8885 goto next;
8886 }
8887
8888 em->start = cur_offset;
8889 em->len = ins.offset;
8890 em->disk_bytenr = ins.objectid;
8891 em->offset = 0;
8892 em->disk_num_bytes = ins.offset;
8893 em->ram_bytes = ins.offset;
8894 em->flags |= EXTENT_FLAG_PREALLOC;
8895 em->generation = trans->transid;
8896
8897 ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, true);
8898 free_extent_map(em);
8899 next:
8900 num_bytes -= ins.offset;
8901 cur_offset += ins.offset;
8902 *alloc_hint = ins.objectid + ins.offset;
8903
8904 inode_inc_iversion(inode);
8905 inode_set_ctime_current(inode);
8906 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
8907 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
8908 (actual_len > inode->i_size) &&
8909 (cur_offset > inode->i_size)) {
8910 if (cur_offset > actual_len)
8911 i_size = actual_len;
8912 else
8913 i_size = cur_offset;
8914 i_size_write(inode, i_size);
8915 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
8916 }
8917
8918 ret = btrfs_update_inode(trans, BTRFS_I(inode));
8919
8920 if (ret) {
8921 btrfs_abort_transaction(trans, ret);
8922 if (own_trans)
8923 btrfs_end_transaction(trans);
8924 break;
8925 }
8926
8927 if (own_trans) {
8928 btrfs_end_transaction(trans);
8929 trans = NULL;
8930 }
8931 }
8932 if (clear_offset < end)
8933 btrfs_free_reserved_data_space(BTRFS_I(inode), NULL, clear_offset,
8934 end - clear_offset + 1);
8935 return ret;
8936 }
8937
btrfs_prealloc_file_range(struct inode * inode,int mode,u64 start,u64 num_bytes,u64 min_size,loff_t actual_len,u64 * alloc_hint)8938 int btrfs_prealloc_file_range(struct inode *inode, int mode,
8939 u64 start, u64 num_bytes, u64 min_size,
8940 loff_t actual_len, u64 *alloc_hint)
8941 {
8942 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
8943 min_size, actual_len, alloc_hint,
8944 NULL);
8945 }
8946
btrfs_prealloc_file_range_trans(struct inode * inode,struct btrfs_trans_handle * trans,int mode,u64 start,u64 num_bytes,u64 min_size,loff_t actual_len,u64 * alloc_hint)8947 int btrfs_prealloc_file_range_trans(struct inode *inode,
8948 struct btrfs_trans_handle *trans, int mode,
8949 u64 start, u64 num_bytes, u64 min_size,
8950 loff_t actual_len, u64 *alloc_hint)
8951 {
8952 return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
8953 min_size, actual_len, alloc_hint, trans);
8954 }
8955
btrfs_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)8956 static int btrfs_permission(struct mnt_idmap *idmap,
8957 struct inode *inode, int mask)
8958 {
8959 struct btrfs_root *root = BTRFS_I(inode)->root;
8960 umode_t mode = inode->i_mode;
8961
8962 if (mask & MAY_WRITE &&
8963 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
8964 if (btrfs_root_readonly(root))
8965 return -EROFS;
8966 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
8967 return -EACCES;
8968 }
8969 return generic_permission(idmap, inode, mask);
8970 }
8971
btrfs_tmpfile(struct mnt_idmap * idmap,struct inode * dir,struct file * file,umode_t mode)8972 static int btrfs_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
8973 struct file *file, umode_t mode)
8974 {
8975 struct btrfs_fs_info *fs_info = inode_to_fs_info(dir);
8976 struct btrfs_trans_handle *trans;
8977 struct btrfs_root *root = BTRFS_I(dir)->root;
8978 struct inode *inode;
8979 struct btrfs_new_inode_args new_inode_args = {
8980 .dir = dir,
8981 .dentry = file->f_path.dentry,
8982 .orphan = true,
8983 };
8984 unsigned int trans_num_items;
8985 int ret;
8986
8987 inode = new_inode(dir->i_sb);
8988 if (!inode)
8989 return -ENOMEM;
8990 inode_init_owner(idmap, inode, dir, mode);
8991 inode->i_fop = &btrfs_file_operations;
8992 inode->i_op = &btrfs_file_inode_operations;
8993 inode->i_mapping->a_ops = &btrfs_aops;
8994
8995 new_inode_args.inode = inode;
8996 ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items);
8997 if (ret)
8998 goto out_inode;
8999
9000 trans = btrfs_start_transaction(root, trans_num_items);
9001 if (IS_ERR(trans)) {
9002 ret = PTR_ERR(trans);
9003 goto out_new_inode_args;
9004 }
9005
9006 ret = btrfs_create_new_inode(trans, &new_inode_args);
9007
9008 /*
9009 * We set number of links to 0 in btrfs_create_new_inode(), and here we
9010 * set it to 1 because d_tmpfile() will issue a warning if the count is
9011 * 0, through:
9012 *
9013 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
9014 */
9015 set_nlink(inode, 1);
9016
9017 if (!ret) {
9018 d_tmpfile(file, inode);
9019 unlock_new_inode(inode);
9020 mark_inode_dirty(inode);
9021 }
9022
9023 btrfs_end_transaction(trans);
9024 btrfs_btree_balance_dirty(fs_info);
9025 out_new_inode_args:
9026 btrfs_new_inode_args_destroy(&new_inode_args);
9027 out_inode:
9028 if (ret)
9029 iput(inode);
9030 return finish_open_simple(file, ret);
9031 }
9032
btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info * fs_info,int compress_type)9033 int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
9034 int compress_type)
9035 {
9036 switch (compress_type) {
9037 case BTRFS_COMPRESS_NONE:
9038 return BTRFS_ENCODED_IO_COMPRESSION_NONE;
9039 case BTRFS_COMPRESS_ZLIB:
9040 return BTRFS_ENCODED_IO_COMPRESSION_ZLIB;
9041 case BTRFS_COMPRESS_LZO:
9042 /*
9043 * The LZO format depends on the sector size. 64K is the maximum
9044 * sector size that we support.
9045 */
9046 if (fs_info->sectorsize < SZ_4K || fs_info->sectorsize > SZ_64K)
9047 return -EINVAL;
9048 return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K +
9049 (fs_info->sectorsize_bits - 12);
9050 case BTRFS_COMPRESS_ZSTD:
9051 return BTRFS_ENCODED_IO_COMPRESSION_ZSTD;
9052 default:
9053 return -EUCLEAN;
9054 }
9055 }
9056
btrfs_encoded_read_inline(struct kiocb * iocb,struct iov_iter * iter,u64 start,u64 lockend,struct extent_state ** cached_state,u64 extent_start,size_t count,struct btrfs_ioctl_encoded_io_args * encoded,bool * unlocked)9057 static ssize_t btrfs_encoded_read_inline(
9058 struct kiocb *iocb,
9059 struct iov_iter *iter, u64 start,
9060 u64 lockend,
9061 struct extent_state **cached_state,
9062 u64 extent_start, size_t count,
9063 struct btrfs_ioctl_encoded_io_args *encoded,
9064 bool *unlocked)
9065 {
9066 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
9067 struct btrfs_root *root = inode->root;
9068 struct btrfs_fs_info *fs_info = root->fs_info;
9069 struct extent_io_tree *io_tree = &inode->io_tree;
9070 struct btrfs_path *path;
9071 struct extent_buffer *leaf;
9072 struct btrfs_file_extent_item *item;
9073 u64 ram_bytes;
9074 unsigned long ptr;
9075 void *tmp;
9076 ssize_t ret;
9077 const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
9078
9079 path = btrfs_alloc_path();
9080 if (!path) {
9081 ret = -ENOMEM;
9082 goto out;
9083 }
9084
9085 path->nowait = nowait;
9086
9087 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
9088 extent_start, 0);
9089 if (ret) {
9090 if (ret > 0) {
9091 /* The extent item disappeared? */
9092 ret = -EIO;
9093 }
9094 goto out;
9095 }
9096 leaf = path->nodes[0];
9097 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
9098
9099 ram_bytes = btrfs_file_extent_ram_bytes(leaf, item);
9100 ptr = btrfs_file_extent_inline_start(item);
9101
9102 encoded->len = min_t(u64, extent_start + ram_bytes,
9103 inode->vfs_inode.i_size) - iocb->ki_pos;
9104 ret = btrfs_encoded_io_compression_from_extent(fs_info,
9105 btrfs_file_extent_compression(leaf, item));
9106 if (ret < 0)
9107 goto out;
9108 encoded->compression = ret;
9109 if (encoded->compression) {
9110 size_t inline_size;
9111
9112 inline_size = btrfs_file_extent_inline_item_len(leaf,
9113 path->slots[0]);
9114 if (inline_size > count) {
9115 ret = -ENOBUFS;
9116 goto out;
9117 }
9118 count = inline_size;
9119 encoded->unencoded_len = ram_bytes;
9120 encoded->unencoded_offset = iocb->ki_pos - extent_start;
9121 } else {
9122 count = min_t(u64, count, encoded->len);
9123 encoded->len = count;
9124 encoded->unencoded_len = count;
9125 ptr += iocb->ki_pos - extent_start;
9126 }
9127
9128 tmp = kmalloc(count, GFP_NOFS);
9129 if (!tmp) {
9130 ret = -ENOMEM;
9131 goto out;
9132 }
9133 read_extent_buffer(leaf, tmp, ptr, count);
9134 btrfs_release_path(path);
9135 unlock_extent(io_tree, start, lockend, cached_state);
9136 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9137 *unlocked = true;
9138
9139 ret = copy_to_iter(tmp, count, iter);
9140 if (ret != count)
9141 ret = -EFAULT;
9142 kfree(tmp);
9143 out:
9144 btrfs_free_path(path);
9145 return ret;
9146 }
9147
9148 struct btrfs_encoded_read_private {
9149 struct completion done;
9150 void *uring_ctx;
9151 refcount_t pending_refs;
9152 blk_status_t status;
9153 };
9154
btrfs_encoded_read_endio(struct btrfs_bio * bbio)9155 static void btrfs_encoded_read_endio(struct btrfs_bio *bbio)
9156 {
9157 struct btrfs_encoded_read_private *priv = bbio->private;
9158
9159 if (bbio->bio.bi_status) {
9160 /*
9161 * The memory barrier implied by the atomic_dec_return() here
9162 * pairs with the memory barrier implied by the
9163 * atomic_dec_return() or io_wait_event() in
9164 * btrfs_encoded_read_regular_fill_pages() to ensure that this
9165 * write is observed before the load of status in
9166 * btrfs_encoded_read_regular_fill_pages().
9167 */
9168 WRITE_ONCE(priv->status, bbio->bio.bi_status);
9169 }
9170 if (refcount_dec_and_test(&priv->pending_refs)) {
9171 int err = blk_status_to_errno(READ_ONCE(priv->status));
9172
9173 if (priv->uring_ctx) {
9174 btrfs_uring_read_extent_endio(priv->uring_ctx, err);
9175 kfree(priv);
9176 } else {
9177 complete(&priv->done);
9178 }
9179 }
9180 bio_put(&bbio->bio);
9181 }
9182
btrfs_encoded_read_regular_fill_pages(struct btrfs_inode * inode,u64 disk_bytenr,u64 disk_io_size,struct page ** pages,void * uring_ctx)9183 int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
9184 u64 disk_bytenr, u64 disk_io_size,
9185 struct page **pages, void *uring_ctx)
9186 {
9187 struct btrfs_fs_info *fs_info = inode->root->fs_info;
9188 struct btrfs_encoded_read_private *priv;
9189 unsigned long i = 0;
9190 struct btrfs_bio *bbio;
9191 int ret;
9192
9193 priv = kmalloc(sizeof(struct btrfs_encoded_read_private), GFP_NOFS);
9194 if (!priv)
9195 return -ENOMEM;
9196
9197 init_completion(&priv->done);
9198 refcount_set(&priv->pending_refs, 1);
9199 priv->status = 0;
9200 priv->uring_ctx = uring_ctx;
9201
9202 bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
9203 btrfs_encoded_read_endio, priv);
9204 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
9205 bbio->inode = inode;
9206
9207 do {
9208 size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE);
9209
9210 if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) {
9211 refcount_inc(&priv->pending_refs);
9212 btrfs_submit_bbio(bbio, 0);
9213
9214 bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
9215 btrfs_encoded_read_endio, priv);
9216 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
9217 bbio->inode = inode;
9218 continue;
9219 }
9220
9221 i++;
9222 disk_bytenr += bytes;
9223 disk_io_size -= bytes;
9224 } while (disk_io_size);
9225
9226 refcount_inc(&priv->pending_refs);
9227 btrfs_submit_bbio(bbio, 0);
9228
9229 if (uring_ctx) {
9230 if (refcount_dec_and_test(&priv->pending_refs)) {
9231 ret = blk_status_to_errno(READ_ONCE(priv->status));
9232 btrfs_uring_read_extent_endio(uring_ctx, ret);
9233 kfree(priv);
9234 return ret;
9235 }
9236
9237 return -EIOCBQUEUED;
9238 } else {
9239 if (!refcount_dec_and_test(&priv->pending_refs))
9240 wait_for_completion_io(&priv->done);
9241 /* See btrfs_encoded_read_endio() for ordering. */
9242 ret = blk_status_to_errno(READ_ONCE(priv->status));
9243 kfree(priv);
9244 return ret;
9245 }
9246 }
9247
btrfs_encoded_read_regular(struct kiocb * iocb,struct iov_iter * iter,u64 start,u64 lockend,struct extent_state ** cached_state,u64 disk_bytenr,u64 disk_io_size,size_t count,bool compressed,bool * unlocked)9248 ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, struct iov_iter *iter,
9249 u64 start, u64 lockend,
9250 struct extent_state **cached_state,
9251 u64 disk_bytenr, u64 disk_io_size,
9252 size_t count, bool compressed, bool *unlocked)
9253 {
9254 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
9255 struct extent_io_tree *io_tree = &inode->io_tree;
9256 struct page **pages;
9257 unsigned long nr_pages, i;
9258 u64 cur;
9259 size_t page_offset;
9260 ssize_t ret;
9261
9262 nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE);
9263 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
9264 if (!pages)
9265 return -ENOMEM;
9266 ret = btrfs_alloc_page_array(nr_pages, pages, false);
9267 if (ret) {
9268 ret = -ENOMEM;
9269 goto out;
9270 }
9271
9272 ret = btrfs_encoded_read_regular_fill_pages(inode, disk_bytenr,
9273 disk_io_size, pages, NULL);
9274 if (ret)
9275 goto out;
9276
9277 unlock_extent(io_tree, start, lockend, cached_state);
9278 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9279 *unlocked = true;
9280
9281 if (compressed) {
9282 i = 0;
9283 page_offset = 0;
9284 } else {
9285 i = (iocb->ki_pos - start) >> PAGE_SHIFT;
9286 page_offset = (iocb->ki_pos - start) & (PAGE_SIZE - 1);
9287 }
9288 cur = 0;
9289 while (cur < count) {
9290 size_t bytes = min_t(size_t, count - cur,
9291 PAGE_SIZE - page_offset);
9292
9293 if (copy_page_to_iter(pages[i], page_offset, bytes,
9294 iter) != bytes) {
9295 ret = -EFAULT;
9296 goto out;
9297 }
9298 i++;
9299 cur += bytes;
9300 page_offset = 0;
9301 }
9302 ret = count;
9303 out:
9304 for (i = 0; i < nr_pages; i++) {
9305 if (pages[i])
9306 __free_page(pages[i]);
9307 }
9308 kfree(pages);
9309 return ret;
9310 }
9311
btrfs_encoded_read(struct kiocb * iocb,struct iov_iter * iter,struct btrfs_ioctl_encoded_io_args * encoded,struct extent_state ** cached_state,u64 * disk_bytenr,u64 * disk_io_size)9312 ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
9313 struct btrfs_ioctl_encoded_io_args *encoded,
9314 struct extent_state **cached_state,
9315 u64 *disk_bytenr, u64 *disk_io_size)
9316 {
9317 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
9318 struct btrfs_fs_info *fs_info = inode->root->fs_info;
9319 struct extent_io_tree *io_tree = &inode->io_tree;
9320 ssize_t ret;
9321 size_t count = iov_iter_count(iter);
9322 u64 start, lockend;
9323 struct extent_map *em;
9324 const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
9325 bool unlocked = false;
9326
9327 file_accessed(iocb->ki_filp);
9328
9329 ret = btrfs_inode_lock(inode,
9330 BTRFS_ILOCK_SHARED | (nowait ? BTRFS_ILOCK_TRY : 0));
9331 if (ret)
9332 return ret;
9333
9334 if (iocb->ki_pos >= inode->vfs_inode.i_size) {
9335 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9336 return 0;
9337 }
9338 start = ALIGN_DOWN(iocb->ki_pos, fs_info->sectorsize);
9339 /*
9340 * We don't know how long the extent containing iocb->ki_pos is, but if
9341 * it's compressed we know that it won't be longer than this.
9342 */
9343 lockend = start + BTRFS_MAX_UNCOMPRESSED - 1;
9344
9345 if (nowait) {
9346 struct btrfs_ordered_extent *ordered;
9347
9348 if (filemap_range_needs_writeback(inode->vfs_inode.i_mapping,
9349 start, lockend)) {
9350 ret = -EAGAIN;
9351 goto out_unlock_inode;
9352 }
9353
9354 if (!try_lock_extent(io_tree, start, lockend, cached_state)) {
9355 ret = -EAGAIN;
9356 goto out_unlock_inode;
9357 }
9358
9359 ordered = btrfs_lookup_ordered_range(inode, start,
9360 lockend - start + 1);
9361 if (ordered) {
9362 btrfs_put_ordered_extent(ordered);
9363 unlock_extent(io_tree, start, lockend, cached_state);
9364 ret = -EAGAIN;
9365 goto out_unlock_inode;
9366 }
9367 } else {
9368 for (;;) {
9369 struct btrfs_ordered_extent *ordered;
9370
9371 ret = btrfs_wait_ordered_range(inode, start,
9372 lockend - start + 1);
9373 if (ret)
9374 goto out_unlock_inode;
9375
9376 lock_extent(io_tree, start, lockend, cached_state);
9377 ordered = btrfs_lookup_ordered_range(inode, start,
9378 lockend - start + 1);
9379 if (!ordered)
9380 break;
9381 btrfs_put_ordered_extent(ordered);
9382 unlock_extent(io_tree, start, lockend, cached_state);
9383 cond_resched();
9384 }
9385 }
9386
9387 em = btrfs_get_extent(inode, NULL, start, lockend - start + 1);
9388 if (IS_ERR(em)) {
9389 ret = PTR_ERR(em);
9390 goto out_unlock_extent;
9391 }
9392
9393 if (em->disk_bytenr == EXTENT_MAP_INLINE) {
9394 u64 extent_start = em->start;
9395
9396 /*
9397 * For inline extents we get everything we need out of the
9398 * extent item.
9399 */
9400 free_extent_map(em);
9401 em = NULL;
9402 ret = btrfs_encoded_read_inline(iocb, iter, start, lockend,
9403 cached_state, extent_start,
9404 count, encoded, &unlocked);
9405 goto out_unlock_extent;
9406 }
9407
9408 /*
9409 * We only want to return up to EOF even if the extent extends beyond
9410 * that.
9411 */
9412 encoded->len = min_t(u64, extent_map_end(em),
9413 inode->vfs_inode.i_size) - iocb->ki_pos;
9414 if (em->disk_bytenr == EXTENT_MAP_HOLE ||
9415 (em->flags & EXTENT_FLAG_PREALLOC)) {
9416 *disk_bytenr = EXTENT_MAP_HOLE;
9417 count = min_t(u64, count, encoded->len);
9418 encoded->len = count;
9419 encoded->unencoded_len = count;
9420 } else if (extent_map_is_compressed(em)) {
9421 *disk_bytenr = em->disk_bytenr;
9422 /*
9423 * Bail if the buffer isn't large enough to return the whole
9424 * compressed extent.
9425 */
9426 if (em->disk_num_bytes > count) {
9427 ret = -ENOBUFS;
9428 goto out_em;
9429 }
9430 *disk_io_size = em->disk_num_bytes;
9431 count = em->disk_num_bytes;
9432 encoded->unencoded_len = em->ram_bytes;
9433 encoded->unencoded_offset = iocb->ki_pos - (em->start - em->offset);
9434 ret = btrfs_encoded_io_compression_from_extent(fs_info,
9435 extent_map_compression(em));
9436 if (ret < 0)
9437 goto out_em;
9438 encoded->compression = ret;
9439 } else {
9440 *disk_bytenr = extent_map_block_start(em) + (start - em->start);
9441 if (encoded->len > count)
9442 encoded->len = count;
9443 /*
9444 * Don't read beyond what we locked. This also limits the page
9445 * allocations that we'll do.
9446 */
9447 *disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start;
9448 count = start + *disk_io_size - iocb->ki_pos;
9449 encoded->len = count;
9450 encoded->unencoded_len = count;
9451 *disk_io_size = ALIGN(*disk_io_size, fs_info->sectorsize);
9452 }
9453 free_extent_map(em);
9454 em = NULL;
9455
9456 if (*disk_bytenr == EXTENT_MAP_HOLE) {
9457 unlock_extent(io_tree, start, lockend, cached_state);
9458 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9459 unlocked = true;
9460 ret = iov_iter_zero(count, iter);
9461 if (ret != count)
9462 ret = -EFAULT;
9463 } else {
9464 ret = -EIOCBQUEUED;
9465 goto out_unlock_extent;
9466 }
9467
9468 out_em:
9469 free_extent_map(em);
9470 out_unlock_extent:
9471 /* Leave inode and extent locked if we need to do a read. */
9472 if (!unlocked && ret != -EIOCBQUEUED)
9473 unlock_extent(io_tree, start, lockend, cached_state);
9474 out_unlock_inode:
9475 if (!unlocked && ret != -EIOCBQUEUED)
9476 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
9477 return ret;
9478 }
9479
btrfs_do_encoded_write(struct kiocb * iocb,struct iov_iter * from,const struct btrfs_ioctl_encoded_io_args * encoded)9480 ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
9481 const struct btrfs_ioctl_encoded_io_args *encoded)
9482 {
9483 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
9484 struct btrfs_root *root = inode->root;
9485 struct btrfs_fs_info *fs_info = root->fs_info;
9486 struct extent_io_tree *io_tree = &inode->io_tree;
9487 struct extent_changeset *data_reserved = NULL;
9488 struct extent_state *cached_state = NULL;
9489 struct btrfs_ordered_extent *ordered;
9490 struct btrfs_file_extent file_extent;
9491 int compression;
9492 size_t orig_count;
9493 u64 start, end;
9494 u64 num_bytes, ram_bytes, disk_num_bytes;
9495 unsigned long nr_folios, i;
9496 struct folio **folios;
9497 struct btrfs_key ins;
9498 bool extent_reserved = false;
9499 struct extent_map *em;
9500 ssize_t ret;
9501
9502 switch (encoded->compression) {
9503 case BTRFS_ENCODED_IO_COMPRESSION_ZLIB:
9504 compression = BTRFS_COMPRESS_ZLIB;
9505 break;
9506 case BTRFS_ENCODED_IO_COMPRESSION_ZSTD:
9507 compression = BTRFS_COMPRESS_ZSTD;
9508 break;
9509 case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K:
9510 case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K:
9511 case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K:
9512 case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K:
9513 case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K:
9514 /* The sector size must match for LZO. */
9515 if (encoded->compression -
9516 BTRFS_ENCODED_IO_COMPRESSION_LZO_4K + 12 !=
9517 fs_info->sectorsize_bits)
9518 return -EINVAL;
9519 compression = BTRFS_COMPRESS_LZO;
9520 break;
9521 default:
9522 return -EINVAL;
9523 }
9524 if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE)
9525 return -EINVAL;
9526
9527 /*
9528 * Compressed extents should always have checksums, so error out if we
9529 * have a NOCOW file or inode was created while mounted with NODATASUM.
9530 */
9531 if (inode->flags & BTRFS_INODE_NODATASUM)
9532 return -EINVAL;
9533
9534 orig_count = iov_iter_count(from);
9535
9536 /* The extent size must be sane. */
9537 if (encoded->unencoded_len > BTRFS_MAX_UNCOMPRESSED ||
9538 orig_count > BTRFS_MAX_COMPRESSED || orig_count == 0)
9539 return -EINVAL;
9540
9541 /*
9542 * The compressed data must be smaller than the decompressed data.
9543 *
9544 * It's of course possible for data to compress to larger or the same
9545 * size, but the buffered I/O path falls back to no compression for such
9546 * data, and we don't want to break any assumptions by creating these
9547 * extents.
9548 *
9549 * Note that this is less strict than the current check we have that the
9550 * compressed data must be at least one sector smaller than the
9551 * decompressed data. We only want to enforce the weaker requirement
9552 * from old kernels that it is at least one byte smaller.
9553 */
9554 if (orig_count >= encoded->unencoded_len)
9555 return -EINVAL;
9556
9557 /* The extent must start on a sector boundary. */
9558 start = iocb->ki_pos;
9559 if (!IS_ALIGNED(start, fs_info->sectorsize))
9560 return -EINVAL;
9561
9562 /*
9563 * The extent must end on a sector boundary. However, we allow a write
9564 * which ends at or extends i_size to have an unaligned length; we round
9565 * up the extent size and set i_size to the unaligned end.
9566 */
9567 if (start + encoded->len < inode->vfs_inode.i_size &&
9568 !IS_ALIGNED(start + encoded->len, fs_info->sectorsize))
9569 return -EINVAL;
9570
9571 /* Finally, the offset in the unencoded data must be sector-aligned. */
9572 if (!IS_ALIGNED(encoded->unencoded_offset, fs_info->sectorsize))
9573 return -EINVAL;
9574
9575 num_bytes = ALIGN(encoded->len, fs_info->sectorsize);
9576 ram_bytes = ALIGN(encoded->unencoded_len, fs_info->sectorsize);
9577 end = start + num_bytes - 1;
9578
9579 /*
9580 * If the extent cannot be inline, the compressed data on disk must be
9581 * sector-aligned. For convenience, we extend it with zeroes if it
9582 * isn't.
9583 */
9584 disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize);
9585 nr_folios = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE);
9586 folios = kvcalloc(nr_folios, sizeof(struct folio *), GFP_KERNEL_ACCOUNT);
9587 if (!folios)
9588 return -ENOMEM;
9589 for (i = 0; i < nr_folios; i++) {
9590 size_t bytes = min_t(size_t, PAGE_SIZE, iov_iter_count(from));
9591 char *kaddr;
9592
9593 folios[i] = folio_alloc(GFP_KERNEL_ACCOUNT, 0);
9594 if (!folios[i]) {
9595 ret = -ENOMEM;
9596 goto out_folios;
9597 }
9598 kaddr = kmap_local_folio(folios[i], 0);
9599 if (copy_from_iter(kaddr, bytes, from) != bytes) {
9600 kunmap_local(kaddr);
9601 ret = -EFAULT;
9602 goto out_folios;
9603 }
9604 if (bytes < PAGE_SIZE)
9605 memset(kaddr + bytes, 0, PAGE_SIZE - bytes);
9606 kunmap_local(kaddr);
9607 }
9608
9609 for (;;) {
9610 struct btrfs_ordered_extent *ordered;
9611
9612 ret = btrfs_wait_ordered_range(inode, start, num_bytes);
9613 if (ret)
9614 goto out_folios;
9615 ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping,
9616 start >> PAGE_SHIFT,
9617 end >> PAGE_SHIFT);
9618 if (ret)
9619 goto out_folios;
9620 lock_extent(io_tree, start, end, &cached_state);
9621 ordered = btrfs_lookup_ordered_range(inode, start, num_bytes);
9622 if (!ordered &&
9623 !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end))
9624 break;
9625 if (ordered)
9626 btrfs_put_ordered_extent(ordered);
9627 unlock_extent(io_tree, start, end, &cached_state);
9628 cond_resched();
9629 }
9630
9631 /*
9632 * We don't use the higher-level delalloc space functions because our
9633 * num_bytes and disk_num_bytes are different.
9634 */
9635 ret = btrfs_alloc_data_chunk_ondemand(inode, disk_num_bytes);
9636 if (ret)
9637 goto out_unlock;
9638 ret = btrfs_qgroup_reserve_data(inode, &data_reserved, start, num_bytes);
9639 if (ret)
9640 goto out_free_data_space;
9641 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes, disk_num_bytes,
9642 false);
9643 if (ret)
9644 goto out_qgroup_free_data;
9645
9646 /* Try an inline extent first. */
9647 if (encoded->unencoded_len == encoded->len &&
9648 encoded->unencoded_offset == 0 &&
9649 can_cow_file_range_inline(inode, start, encoded->len, orig_count)) {
9650 ret = __cow_file_range_inline(inode, encoded->len,
9651 orig_count, compression, folios[0],
9652 true);
9653 if (ret <= 0) {
9654 if (ret == 0)
9655 ret = orig_count;
9656 goto out_delalloc_release;
9657 }
9658 }
9659
9660 ret = btrfs_reserve_extent(root, disk_num_bytes, disk_num_bytes,
9661 disk_num_bytes, 0, 0, &ins, 1, 1);
9662 if (ret)
9663 goto out_delalloc_release;
9664 extent_reserved = true;
9665
9666 file_extent.disk_bytenr = ins.objectid;
9667 file_extent.disk_num_bytes = ins.offset;
9668 file_extent.num_bytes = num_bytes;
9669 file_extent.ram_bytes = ram_bytes;
9670 file_extent.offset = encoded->unencoded_offset;
9671 file_extent.compression = compression;
9672 em = btrfs_create_io_em(inode, start, &file_extent, BTRFS_ORDERED_COMPRESSED);
9673 if (IS_ERR(em)) {
9674 ret = PTR_ERR(em);
9675 goto out_free_reserved;
9676 }
9677 free_extent_map(em);
9678
9679 ordered = btrfs_alloc_ordered_extent(inode, start, &file_extent,
9680 (1 << BTRFS_ORDERED_ENCODED) |
9681 (1 << BTRFS_ORDERED_COMPRESSED));
9682 if (IS_ERR(ordered)) {
9683 btrfs_drop_extent_map_range(inode, start, end, false);
9684 ret = PTR_ERR(ordered);
9685 goto out_free_reserved;
9686 }
9687 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
9688
9689 if (start + encoded->len > inode->vfs_inode.i_size)
9690 i_size_write(&inode->vfs_inode, start + encoded->len);
9691
9692 unlock_extent(io_tree, start, end, &cached_state);
9693
9694 btrfs_delalloc_release_extents(inode, num_bytes);
9695
9696 btrfs_submit_compressed_write(ordered, folios, nr_folios, 0, false);
9697 ret = orig_count;
9698 goto out;
9699
9700 out_free_reserved:
9701 btrfs_dec_block_group_reservations(fs_info, ins.objectid);
9702 btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
9703 out_delalloc_release:
9704 btrfs_delalloc_release_extents(inode, num_bytes);
9705 btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0);
9706 out_qgroup_free_data:
9707 if (ret < 0)
9708 btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes, NULL);
9709 out_free_data_space:
9710 /*
9711 * If btrfs_reserve_extent() succeeded, then we already decremented
9712 * bytes_may_use.
9713 */
9714 if (!extent_reserved)
9715 btrfs_free_reserved_data_space_noquota(fs_info, disk_num_bytes);
9716 out_unlock:
9717 unlock_extent(io_tree, start, end, &cached_state);
9718 out_folios:
9719 for (i = 0; i < nr_folios; i++) {
9720 if (folios[i])
9721 folio_put(folios[i]);
9722 }
9723 kvfree(folios);
9724 out:
9725 if (ret >= 0)
9726 iocb->ki_pos += encoded->len;
9727 return ret;
9728 }
9729
9730 #ifdef CONFIG_SWAP
9731 /*
9732 * Add an entry indicating a block group or device which is pinned by a
9733 * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a
9734 * negative errno on failure.
9735 */
btrfs_add_swapfile_pin(struct inode * inode,void * ptr,bool is_block_group)9736 static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr,
9737 bool is_block_group)
9738 {
9739 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
9740 struct btrfs_swapfile_pin *sp, *entry;
9741 struct rb_node **p;
9742 struct rb_node *parent = NULL;
9743
9744 sp = kmalloc(sizeof(*sp), GFP_NOFS);
9745 if (!sp)
9746 return -ENOMEM;
9747 sp->ptr = ptr;
9748 sp->inode = inode;
9749 sp->is_block_group = is_block_group;
9750 sp->bg_extent_count = 1;
9751
9752 spin_lock(&fs_info->swapfile_pins_lock);
9753 p = &fs_info->swapfile_pins.rb_node;
9754 while (*p) {
9755 parent = *p;
9756 entry = rb_entry(parent, struct btrfs_swapfile_pin, node);
9757 if (sp->ptr < entry->ptr ||
9758 (sp->ptr == entry->ptr && sp->inode < entry->inode)) {
9759 p = &(*p)->rb_left;
9760 } else if (sp->ptr > entry->ptr ||
9761 (sp->ptr == entry->ptr && sp->inode > entry->inode)) {
9762 p = &(*p)->rb_right;
9763 } else {
9764 if (is_block_group)
9765 entry->bg_extent_count++;
9766 spin_unlock(&fs_info->swapfile_pins_lock);
9767 kfree(sp);
9768 return 1;
9769 }
9770 }
9771 rb_link_node(&sp->node, parent, p);
9772 rb_insert_color(&sp->node, &fs_info->swapfile_pins);
9773 spin_unlock(&fs_info->swapfile_pins_lock);
9774 return 0;
9775 }
9776
9777 /* Free all of the entries pinned by this swapfile. */
btrfs_free_swapfile_pins(struct inode * inode)9778 static void btrfs_free_swapfile_pins(struct inode *inode)
9779 {
9780 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
9781 struct btrfs_swapfile_pin *sp;
9782 struct rb_node *node, *next;
9783
9784 spin_lock(&fs_info->swapfile_pins_lock);
9785 node = rb_first(&fs_info->swapfile_pins);
9786 while (node) {
9787 next = rb_next(node);
9788 sp = rb_entry(node, struct btrfs_swapfile_pin, node);
9789 if (sp->inode == inode) {
9790 rb_erase(&sp->node, &fs_info->swapfile_pins);
9791 if (sp->is_block_group) {
9792 btrfs_dec_block_group_swap_extents(sp->ptr,
9793 sp->bg_extent_count);
9794 btrfs_put_block_group(sp->ptr);
9795 }
9796 kfree(sp);
9797 }
9798 node = next;
9799 }
9800 spin_unlock(&fs_info->swapfile_pins_lock);
9801 }
9802
9803 struct btrfs_swap_info {
9804 u64 start;
9805 u64 block_start;
9806 u64 block_len;
9807 u64 lowest_ppage;
9808 u64 highest_ppage;
9809 unsigned long nr_pages;
9810 int nr_extents;
9811 };
9812
btrfs_add_swap_extent(struct swap_info_struct * sis,struct btrfs_swap_info * bsi)9813 static int btrfs_add_swap_extent(struct swap_info_struct *sis,
9814 struct btrfs_swap_info *bsi)
9815 {
9816 unsigned long nr_pages;
9817 unsigned long max_pages;
9818 u64 first_ppage, first_ppage_reported, next_ppage;
9819 int ret;
9820
9821 /*
9822 * Our swapfile may have had its size extended after the swap header was
9823 * written. In that case activating the swapfile should not go beyond
9824 * the max size set in the swap header.
9825 */
9826 if (bsi->nr_pages >= sis->max)
9827 return 0;
9828
9829 max_pages = sis->max - bsi->nr_pages;
9830 first_ppage = PAGE_ALIGN(bsi->block_start) >> PAGE_SHIFT;
9831 next_ppage = PAGE_ALIGN_DOWN(bsi->block_start + bsi->block_len) >> PAGE_SHIFT;
9832
9833 if (first_ppage >= next_ppage)
9834 return 0;
9835 nr_pages = next_ppage - first_ppage;
9836 nr_pages = min(nr_pages, max_pages);
9837
9838 first_ppage_reported = first_ppage;
9839 if (bsi->start == 0)
9840 first_ppage_reported++;
9841 if (bsi->lowest_ppage > first_ppage_reported)
9842 bsi->lowest_ppage = first_ppage_reported;
9843 if (bsi->highest_ppage < (next_ppage - 1))
9844 bsi->highest_ppage = next_ppage - 1;
9845
9846 ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage);
9847 if (ret < 0)
9848 return ret;
9849 bsi->nr_extents += ret;
9850 bsi->nr_pages += nr_pages;
9851 return 0;
9852 }
9853
btrfs_swap_deactivate(struct file * file)9854 static void btrfs_swap_deactivate(struct file *file)
9855 {
9856 struct inode *inode = file_inode(file);
9857
9858 btrfs_free_swapfile_pins(inode);
9859 atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles);
9860 }
9861
btrfs_swap_activate(struct swap_info_struct * sis,struct file * file,sector_t * span)9862 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
9863 sector_t *span)
9864 {
9865 struct inode *inode = file_inode(file);
9866 struct btrfs_root *root = BTRFS_I(inode)->root;
9867 struct btrfs_fs_info *fs_info = root->fs_info;
9868 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
9869 struct extent_state *cached_state = NULL;
9870 struct btrfs_chunk_map *map = NULL;
9871 struct btrfs_device *device = NULL;
9872 struct btrfs_swap_info bsi = {
9873 .lowest_ppage = (sector_t)-1ULL,
9874 };
9875 struct btrfs_backref_share_check_ctx *backref_ctx = NULL;
9876 struct btrfs_path *path = NULL;
9877 int ret = 0;
9878 u64 isize;
9879 u64 prev_extent_end = 0;
9880
9881 /*
9882 * Acquire the inode's mmap lock to prevent races with memory mapped
9883 * writes, as they could happen after we flush delalloc below and before
9884 * we lock the extent range further below. The inode was already locked
9885 * up in the call chain.
9886 */
9887 btrfs_assert_inode_locked(BTRFS_I(inode));
9888 down_write(&BTRFS_I(inode)->i_mmap_lock);
9889
9890 /*
9891 * If the swap file was just created, make sure delalloc is done. If the
9892 * file changes again after this, the user is doing something stupid and
9893 * we don't really care.
9894 */
9895 ret = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
9896 if (ret)
9897 goto out_unlock_mmap;
9898
9899 /*
9900 * The inode is locked, so these flags won't change after we check them.
9901 */
9902 if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) {
9903 btrfs_warn(fs_info, "swapfile must not be compressed");
9904 ret = -EINVAL;
9905 goto out_unlock_mmap;
9906 }
9907 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) {
9908 btrfs_warn(fs_info, "swapfile must not be copy-on-write");
9909 ret = -EINVAL;
9910 goto out_unlock_mmap;
9911 }
9912 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
9913 btrfs_warn(fs_info, "swapfile must not be checksummed");
9914 ret = -EINVAL;
9915 goto out_unlock_mmap;
9916 }
9917
9918 path = btrfs_alloc_path();
9919 backref_ctx = btrfs_alloc_backref_share_check_ctx();
9920 if (!path || !backref_ctx) {
9921 ret = -ENOMEM;
9922 goto out_unlock_mmap;
9923 }
9924
9925 /*
9926 * Balance or device remove/replace/resize can move stuff around from
9927 * under us. The exclop protection makes sure they aren't running/won't
9928 * run concurrently while we are mapping the swap extents, and
9929 * fs_info->swapfile_pins prevents them from running while the swap
9930 * file is active and moving the extents. Note that this also prevents
9931 * a concurrent device add which isn't actually necessary, but it's not
9932 * really worth the trouble to allow it.
9933 */
9934 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) {
9935 btrfs_warn(fs_info,
9936 "cannot activate swapfile while exclusive operation is running");
9937 ret = -EBUSY;
9938 goto out_unlock_mmap;
9939 }
9940
9941 /*
9942 * Prevent snapshot creation while we are activating the swap file.
9943 * We do not want to race with snapshot creation. If snapshot creation
9944 * already started before we bumped nr_swapfiles from 0 to 1 and
9945 * completes before the first write into the swap file after it is
9946 * activated, than that write would fallback to COW.
9947 */
9948 if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) {
9949 btrfs_exclop_finish(fs_info);
9950 btrfs_warn(fs_info,
9951 "cannot activate swapfile because snapshot creation is in progress");
9952 ret = -EINVAL;
9953 goto out_unlock_mmap;
9954 }
9955 /*
9956 * Snapshots can create extents which require COW even if NODATACOW is
9957 * set. We use this counter to prevent snapshots. We must increment it
9958 * before walking the extents because we don't want a concurrent
9959 * snapshot to run after we've already checked the extents.
9960 *
9961 * It is possible that subvolume is marked for deletion but still not
9962 * removed yet. To prevent this race, we check the root status before
9963 * activating the swapfile.
9964 */
9965 spin_lock(&root->root_item_lock);
9966 if (btrfs_root_dead(root)) {
9967 spin_unlock(&root->root_item_lock);
9968
9969 btrfs_drew_write_unlock(&root->snapshot_lock);
9970 btrfs_exclop_finish(fs_info);
9971 btrfs_warn(fs_info,
9972 "cannot activate swapfile because subvolume %llu is being deleted",
9973 btrfs_root_id(root));
9974 ret = -EPERM;
9975 goto out_unlock_mmap;
9976 }
9977 atomic_inc(&root->nr_swapfiles);
9978 spin_unlock(&root->root_item_lock);
9979
9980 isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
9981
9982 lock_extent(io_tree, 0, isize - 1, &cached_state);
9983 while (prev_extent_end < isize) {
9984 struct btrfs_key key;
9985 struct extent_buffer *leaf;
9986 struct btrfs_file_extent_item *ei;
9987 struct btrfs_block_group *bg;
9988 u64 logical_block_start;
9989 u64 physical_block_start;
9990 u64 extent_gen;
9991 u64 disk_bytenr;
9992 u64 len;
9993
9994 key.objectid = btrfs_ino(BTRFS_I(inode));
9995 key.type = BTRFS_EXTENT_DATA_KEY;
9996 key.offset = prev_extent_end;
9997
9998 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
9999 if (ret < 0)
10000 goto out;
10001
10002 /*
10003 * If key not found it means we have an implicit hole (NO_HOLES
10004 * is enabled).
10005 */
10006 if (ret > 0) {
10007 btrfs_warn(fs_info, "swapfile must not have holes");
10008 ret = -EINVAL;
10009 goto out;
10010 }
10011
10012 leaf = path->nodes[0];
10013 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
10014
10015 if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) {
10016 /*
10017 * It's unlikely we'll ever actually find ourselves
10018 * here, as a file small enough to fit inline won't be
10019 * big enough to store more than the swap header, but in
10020 * case something changes in the future, let's catch it
10021 * here rather than later.
10022 */
10023 btrfs_warn(fs_info, "swapfile must not be inline");
10024 ret = -EINVAL;
10025 goto out;
10026 }
10027
10028 if (btrfs_file_extent_compression(leaf, ei) != BTRFS_COMPRESS_NONE) {
10029 btrfs_warn(fs_info, "swapfile must not be compressed");
10030 ret = -EINVAL;
10031 goto out;
10032 }
10033
10034 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
10035 if (disk_bytenr == 0) {
10036 btrfs_warn(fs_info, "swapfile must not have holes");
10037 ret = -EINVAL;
10038 goto out;
10039 }
10040
10041 logical_block_start = disk_bytenr + btrfs_file_extent_offset(leaf, ei);
10042 extent_gen = btrfs_file_extent_generation(leaf, ei);
10043 prev_extent_end = btrfs_file_extent_end(path);
10044
10045 if (prev_extent_end > isize)
10046 len = isize - key.offset;
10047 else
10048 len = btrfs_file_extent_num_bytes(leaf, ei);
10049
10050 backref_ctx->curr_leaf_bytenr = leaf->start;
10051
10052 /*
10053 * Don't need the path anymore, release to avoid deadlocks when
10054 * calling btrfs_is_data_extent_shared() because when joining a
10055 * transaction it can block waiting for the current one's commit
10056 * which in turn may be trying to lock the same leaf to flush
10057 * delayed items for example.
10058 */
10059 btrfs_release_path(path);
10060
10061 ret = btrfs_is_data_extent_shared(BTRFS_I(inode), disk_bytenr,
10062 extent_gen, backref_ctx);
10063 if (ret < 0) {
10064 goto out;
10065 } else if (ret > 0) {
10066 btrfs_warn(fs_info,
10067 "swapfile must not be copy-on-write");
10068 ret = -EINVAL;
10069 goto out;
10070 }
10071
10072 map = btrfs_get_chunk_map(fs_info, logical_block_start, len);
10073 if (IS_ERR(map)) {
10074 ret = PTR_ERR(map);
10075 goto out;
10076 }
10077
10078 if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
10079 btrfs_warn(fs_info,
10080 "swapfile must have single data profile");
10081 ret = -EINVAL;
10082 goto out;
10083 }
10084
10085 if (device == NULL) {
10086 device = map->stripes[0].dev;
10087 ret = btrfs_add_swapfile_pin(inode, device, false);
10088 if (ret == 1)
10089 ret = 0;
10090 else if (ret)
10091 goto out;
10092 } else if (device != map->stripes[0].dev) {
10093 btrfs_warn(fs_info, "swapfile must be on one device");
10094 ret = -EINVAL;
10095 goto out;
10096 }
10097
10098 physical_block_start = (map->stripes[0].physical +
10099 (logical_block_start - map->start));
10100 btrfs_free_chunk_map(map);
10101 map = NULL;
10102
10103 bg = btrfs_lookup_block_group(fs_info, logical_block_start);
10104 if (!bg) {
10105 btrfs_warn(fs_info,
10106 "could not find block group containing swapfile");
10107 ret = -EINVAL;
10108 goto out;
10109 }
10110
10111 if (!btrfs_inc_block_group_swap_extents(bg)) {
10112 btrfs_warn(fs_info,
10113 "block group for swapfile at %llu is read-only%s",
10114 bg->start,
10115 atomic_read(&fs_info->scrubs_running) ?
10116 " (scrub running)" : "");
10117 btrfs_put_block_group(bg);
10118 ret = -EINVAL;
10119 goto out;
10120 }
10121
10122 ret = btrfs_add_swapfile_pin(inode, bg, true);
10123 if (ret) {
10124 btrfs_put_block_group(bg);
10125 if (ret == 1)
10126 ret = 0;
10127 else
10128 goto out;
10129 }
10130
10131 if (bsi.block_len &&
10132 bsi.block_start + bsi.block_len == physical_block_start) {
10133 bsi.block_len += len;
10134 } else {
10135 if (bsi.block_len) {
10136 ret = btrfs_add_swap_extent(sis, &bsi);
10137 if (ret)
10138 goto out;
10139 }
10140 bsi.start = key.offset;
10141 bsi.block_start = physical_block_start;
10142 bsi.block_len = len;
10143 }
10144
10145 if (fatal_signal_pending(current)) {
10146 ret = -EINTR;
10147 goto out;
10148 }
10149
10150 cond_resched();
10151 }
10152
10153 if (bsi.block_len)
10154 ret = btrfs_add_swap_extent(sis, &bsi);
10155
10156 out:
10157 if (!IS_ERR_OR_NULL(map))
10158 btrfs_free_chunk_map(map);
10159
10160 unlock_extent(io_tree, 0, isize - 1, &cached_state);
10161
10162 if (ret)
10163 btrfs_swap_deactivate(file);
10164
10165 btrfs_drew_write_unlock(&root->snapshot_lock);
10166
10167 btrfs_exclop_finish(fs_info);
10168
10169 out_unlock_mmap:
10170 up_write(&BTRFS_I(inode)->i_mmap_lock);
10171 btrfs_free_backref_share_ctx(backref_ctx);
10172 btrfs_free_path(path);
10173 if (ret)
10174 return ret;
10175
10176 if (device)
10177 sis->bdev = device->bdev;
10178 *span = bsi.highest_ppage - bsi.lowest_ppage + 1;
10179 sis->max = bsi.nr_pages;
10180 sis->pages = bsi.nr_pages - 1;
10181 return bsi.nr_extents;
10182 }
10183 #else
btrfs_swap_deactivate(struct file * file)10184 static void btrfs_swap_deactivate(struct file *file)
10185 {
10186 }
10187
btrfs_swap_activate(struct swap_info_struct * sis,struct file * file,sector_t * span)10188 static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
10189 sector_t *span)
10190 {
10191 return -EOPNOTSUPP;
10192 }
10193 #endif
10194
10195 /*
10196 * Update the number of bytes used in the VFS' inode. When we replace extents in
10197 * a range (clone, dedupe, fallocate's zero range), we must update the number of
10198 * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls
10199 * always get a correct value.
10200 */
btrfs_update_inode_bytes(struct btrfs_inode * inode,const u64 add_bytes,const u64 del_bytes)10201 void btrfs_update_inode_bytes(struct btrfs_inode *inode,
10202 const u64 add_bytes,
10203 const u64 del_bytes)
10204 {
10205 if (add_bytes == del_bytes)
10206 return;
10207
10208 spin_lock(&inode->lock);
10209 if (del_bytes > 0)
10210 inode_sub_bytes(&inode->vfs_inode, del_bytes);
10211 if (add_bytes > 0)
10212 inode_add_bytes(&inode->vfs_inode, add_bytes);
10213 spin_unlock(&inode->lock);
10214 }
10215
10216 /*
10217 * Verify that there are no ordered extents for a given file range.
10218 *
10219 * @inode: The target inode.
10220 * @start: Start offset of the file range, should be sector size aligned.
10221 * @end: End offset (inclusive) of the file range, its value +1 should be
10222 * sector size aligned.
10223 *
10224 * This should typically be used for cases where we locked an inode's VFS lock in
10225 * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode,
10226 * we have flushed all delalloc in the range, we have waited for all ordered
10227 * extents in the range to complete and finally we have locked the file range in
10228 * the inode's io_tree.
10229 */
btrfs_assert_inode_range_clean(struct btrfs_inode * inode,u64 start,u64 end)10230 void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end)
10231 {
10232 struct btrfs_root *root = inode->root;
10233 struct btrfs_ordered_extent *ordered;
10234
10235 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
10236 return;
10237
10238 ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start);
10239 if (ordered) {
10240 btrfs_err(root->fs_info,
10241 "found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])",
10242 start, end, btrfs_ino(inode), btrfs_root_id(root),
10243 ordered->file_offset,
10244 ordered->file_offset + ordered->num_bytes - 1);
10245 btrfs_put_ordered_extent(ordered);
10246 }
10247
10248 ASSERT(ordered == NULL);
10249 }
10250
10251 /*
10252 * Find the first inode with a minimum number.
10253 *
10254 * @root: The root to search for.
10255 * @min_ino: The minimum inode number.
10256 *
10257 * Find the first inode in the @root with a number >= @min_ino and return it.
10258 * Returns NULL if no such inode found.
10259 */
btrfs_find_first_inode(struct btrfs_root * root,u64 min_ino)10260 struct btrfs_inode *btrfs_find_first_inode(struct btrfs_root *root, u64 min_ino)
10261 {
10262 struct btrfs_inode *inode;
10263 unsigned long from = min_ino;
10264
10265 xa_lock(&root->inodes);
10266 while (true) {
10267 inode = xa_find(&root->inodes, &from, ULONG_MAX, XA_PRESENT);
10268 if (!inode)
10269 break;
10270 if (igrab(&inode->vfs_inode))
10271 break;
10272
10273 from = btrfs_ino(inode) + 1;
10274 cond_resched_lock(&root->inodes.xa_lock);
10275 }
10276 xa_unlock(&root->inodes);
10277
10278 return inode;
10279 }
10280
10281 static const struct inode_operations btrfs_dir_inode_operations = {
10282 .getattr = btrfs_getattr,
10283 .lookup = btrfs_lookup,
10284 .create = btrfs_create,
10285 .unlink = btrfs_unlink,
10286 .link = btrfs_link,
10287 .mkdir = btrfs_mkdir,
10288 .rmdir = btrfs_rmdir,
10289 .rename = btrfs_rename2,
10290 .symlink = btrfs_symlink,
10291 .setattr = btrfs_setattr,
10292 .mknod = btrfs_mknod,
10293 .listxattr = btrfs_listxattr,
10294 .permission = btrfs_permission,
10295 .get_inode_acl = btrfs_get_acl,
10296 .set_acl = btrfs_set_acl,
10297 .update_time = btrfs_update_time,
10298 .tmpfile = btrfs_tmpfile,
10299 .fileattr_get = btrfs_fileattr_get,
10300 .fileattr_set = btrfs_fileattr_set,
10301 };
10302
10303 static const struct file_operations btrfs_dir_file_operations = {
10304 .llseek = btrfs_dir_llseek,
10305 .read = generic_read_dir,
10306 .iterate_shared = btrfs_real_readdir,
10307 .open = btrfs_opendir,
10308 .unlocked_ioctl = btrfs_ioctl,
10309 #ifdef CONFIG_COMPAT
10310 .compat_ioctl = btrfs_compat_ioctl,
10311 #endif
10312 .release = btrfs_release_file,
10313 .fsync = btrfs_sync_file,
10314 };
10315
10316 /*
10317 * btrfs doesn't support the bmap operation because swapfiles
10318 * use bmap to make a mapping of extents in the file. They assume
10319 * these extents won't change over the life of the file and they
10320 * use the bmap result to do IO directly to the drive.
10321 *
10322 * the btrfs bmap call would return logical addresses that aren't
10323 * suitable for IO and they also will change frequently as COW
10324 * operations happen. So, swapfile + btrfs == corruption.
10325 *
10326 * For now we're avoiding this by dropping bmap.
10327 */
10328 static const struct address_space_operations btrfs_aops = {
10329 .read_folio = btrfs_read_folio,
10330 .writepages = btrfs_writepages,
10331 .readahead = btrfs_readahead,
10332 .invalidate_folio = btrfs_invalidate_folio,
10333 .launder_folio = btrfs_launder_folio,
10334 .release_folio = btrfs_release_folio,
10335 .migrate_folio = btrfs_migrate_folio,
10336 .dirty_folio = filemap_dirty_folio,
10337 .error_remove_folio = generic_error_remove_folio,
10338 .swap_activate = btrfs_swap_activate,
10339 .swap_deactivate = btrfs_swap_deactivate,
10340 };
10341
10342 static const struct inode_operations btrfs_file_inode_operations = {
10343 .getattr = btrfs_getattr,
10344 .setattr = btrfs_setattr,
10345 .listxattr = btrfs_listxattr,
10346 .permission = btrfs_permission,
10347 .fiemap = btrfs_fiemap,
10348 .get_inode_acl = btrfs_get_acl,
10349 .set_acl = btrfs_set_acl,
10350 .update_time = btrfs_update_time,
10351 .fileattr_get = btrfs_fileattr_get,
10352 .fileattr_set = btrfs_fileattr_set,
10353 };
10354 static const struct inode_operations btrfs_special_inode_operations = {
10355 .getattr = btrfs_getattr,
10356 .setattr = btrfs_setattr,
10357 .permission = btrfs_permission,
10358 .listxattr = btrfs_listxattr,
10359 .get_inode_acl = btrfs_get_acl,
10360 .set_acl = btrfs_set_acl,
10361 .update_time = btrfs_update_time,
10362 };
10363 static const struct inode_operations btrfs_symlink_inode_operations = {
10364 .get_link = page_get_link,
10365 .getattr = btrfs_getattr,
10366 .setattr = btrfs_setattr,
10367 .permission = btrfs_permission,
10368 .listxattr = btrfs_listxattr,
10369 .update_time = btrfs_update_time,
10370 };
10371
10372 const struct dentry_operations btrfs_dentry_operations = {
10373 .d_delete = btrfs_dentry_delete,
10374 };
10375