1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #ifndef BTRFS_INODE_H 7 #define BTRFS_INODE_H 8 9 #include <linux/hash.h> 10 #include <linux/refcount.h> 11 #include <linux/spinlock.h> 12 #include <linux/mutex.h> 13 #include <linux/rwsem.h> 14 #include <linux/fs.h> 15 #include <linux/mm.h> 16 #include <linux/compiler.h> 17 #include <linux/fscrypt.h> 18 #include <linux/lockdep.h> 19 #include <uapi/linux/btrfs_tree.h> 20 #include <trace/events/btrfs.h> 21 #include "block-rsv.h" 22 #include "btrfs_inode.h" 23 #include "extent_map.h" 24 #include "extent_io.h" 25 #include "extent-io-tree.h" 26 #include "ordered-data.h" 27 #include "delayed-inode.h" 28 29 struct extent_state; 30 struct posix_acl; 31 struct iov_iter; 32 struct writeback_control; 33 struct btrfs_root; 34 struct btrfs_fs_info; 35 struct btrfs_trans_handle; 36 37 /* 38 * Since we search a directory based on f_pos (struct dir_context::pos) we have 39 * to start at 2 since '.' and '..' have f_pos of 0 and 1 respectively, so 40 * everybody else has to start at 2 (see btrfs_real_readdir() and dir_emit_dots()). 41 */ 42 #define BTRFS_DIR_START_INDEX 2 43 44 /* 45 * ordered_data_close is set by truncate when a file that used 46 * to have good data has been truncated to zero. When it is set 47 * the btrfs file release call will add this inode to the 48 * ordered operations list so that we make sure to flush out any 49 * new data the application may have written before commit. 50 */ 51 enum { 52 BTRFS_INODE_FLUSH_ON_CLOSE, 53 BTRFS_INODE_DUMMY, 54 BTRFS_INODE_IN_DEFRAG, 55 BTRFS_INODE_HAS_ASYNC_EXTENT, 56 /* 57 * Always set under the VFS' inode lock, otherwise it can cause races 58 * during fsync (we start as a fast fsync and then end up in a full 59 * fsync racing with ordered extent completion). 60 */ 61 BTRFS_INODE_NEEDS_FULL_SYNC, 62 BTRFS_INODE_COPY_EVERYTHING, 63 BTRFS_INODE_HAS_PROPS, 64 BTRFS_INODE_SNAPSHOT_FLUSH, 65 /* 66 * Set and used when logging an inode and it serves to signal that an 67 * inode does not have xattrs, so subsequent fsyncs can avoid searching 68 * for xattrs to log. This bit must be cleared whenever a xattr is added 69 * to an inode. 70 */ 71 BTRFS_INODE_NO_XATTRS, 72 /* 73 * Set when we are in a context where we need to start a transaction and 74 * have dirty pages with the respective file range locked. This is to 75 * ensure that when reserving space for the transaction, if we are low 76 * on available space and need to flush delalloc, we will not flush 77 * delalloc for this inode, because that could result in a deadlock (on 78 * the file range, inode's io_tree). 79 */ 80 BTRFS_INODE_NO_DELALLOC_FLUSH, 81 /* 82 * Set when we are working on enabling verity for a file. Computing and 83 * writing the whole Merkle tree can take a while so we want to prevent 84 * races where two separate tasks attempt to simultaneously start verity 85 * on the same file. 86 */ 87 BTRFS_INODE_VERITY_IN_PROGRESS, 88 /* Set when this inode is a free space inode. */ 89 BTRFS_INODE_FREE_SPACE_INODE, 90 /* Set when there are no capabilities in XATTs for the inode. */ 91 BTRFS_INODE_NO_CAP_XATTR, 92 /* 93 * Set if an error happened when doing a COW write before submitting a 94 * bio or during writeback. Used for both buffered writes and direct IO 95 * writes. This is to signal a fast fsync that it has to wait for 96 * ordered extents to complete and therefore not log extent maps that 97 * point to unwritten extents (when an ordered extent completes and it 98 * has the BTRFS_ORDERED_IOERR flag set, it drops extent maps in its 99 * range). 100 */ 101 BTRFS_INODE_COW_WRITE_ERROR, 102 }; 103 104 /* in memory btrfs inode */ 105 struct btrfs_inode { 106 /* which subvolume this inode belongs to */ 107 struct btrfs_root *root; 108 109 /* key used to find this inode on disk. This is used by the code 110 * to read in roots of subvolumes 111 */ 112 struct btrfs_key location; 113 114 /* Cached value of inode property 'compression'. */ 115 u8 prop_compress; 116 117 /* 118 * Force compression on the file using the defrag ioctl, could be 119 * different from prop_compress and takes precedence if set. 120 */ 121 u8 defrag_compress; 122 123 /* 124 * Lock for counters and all fields used to determine if the inode is in 125 * the log or not (last_trans, last_sub_trans, last_log_commit, 126 * logged_trans), to access/update delalloc_bytes, new_delalloc_bytes, 127 * defrag_bytes, disk_i_size, outstanding_extents, csum_bytes and to 128 * update the VFS' inode number of bytes used. 129 */ 130 spinlock_t lock; 131 132 /* the extent_tree has caches of all the extent mappings to disk */ 133 struct extent_map_tree extent_tree; 134 135 /* the io_tree does range state (DIRTY, LOCKED etc) */ 136 struct extent_io_tree io_tree; 137 138 /* 139 * Keep track of where the inode has extent items mapped in order to 140 * make sure the i_size adjustments are accurate. Not required when the 141 * filesystem is NO_HOLES, the status can't be set while mounted as 142 * it's a mkfs-time feature. 143 */ 144 struct extent_io_tree *file_extent_tree; 145 146 /* held while logging the inode in tree-log.c */ 147 struct mutex log_mutex; 148 149 /* 150 * Counters to keep track of the number of extent item's we may use due 151 * to delalloc and such. outstanding_extents is the number of extent 152 * items we think we'll end up using, and reserved_extents is the number 153 * of extent items we've reserved metadata for. Protected by 'lock'. 154 */ 155 unsigned outstanding_extents; 156 157 /* used to order data wrt metadata */ 158 spinlock_t ordered_tree_lock; 159 struct rb_root ordered_tree; 160 struct rb_node *ordered_tree_last; 161 162 /* list of all the delalloc inodes in the FS. There are times we need 163 * to write all the delalloc pages to disk, and this list is used 164 * to walk them all. 165 */ 166 struct list_head delalloc_inodes; 167 168 /* node for the red-black tree that links inodes in subvolume root */ 169 struct rb_node rb_node; 170 171 unsigned long runtime_flags; 172 173 /* full 64 bit generation number, struct vfs_inode doesn't have a big 174 * enough field for this. 175 */ 176 u64 generation; 177 178 /* 179 * ID of the transaction handle that last modified this inode. 180 * Protected by 'lock'. 181 */ 182 u64 last_trans; 183 184 /* 185 * ID of the transaction that last logged this inode. 186 * Protected by 'lock'. 187 */ 188 u64 logged_trans; 189 190 /* 191 * Log transaction ID when this inode was last modified. 192 * Protected by 'lock'. 193 */ 194 int last_sub_trans; 195 196 /* A local copy of root's last_log_commit. Protected by 'lock'. */ 197 int last_log_commit; 198 199 union { 200 /* 201 * Total number of bytes pending delalloc, used by stat to 202 * calculate the real block usage of the file. This is used 203 * only for files. Protected by 'lock'. 204 */ 205 u64 delalloc_bytes; 206 /* 207 * The lowest possible index of the next dir index key which 208 * points to an inode that needs to be logged. 209 * This is used only for directories. 210 * Use the helpers btrfs_get_first_dir_index_to_log() and 211 * btrfs_set_first_dir_index_to_log() to access this field. 212 */ 213 u64 first_dir_index_to_log; 214 }; 215 216 union { 217 /* 218 * Total number of bytes pending delalloc that fall within a file 219 * range that is either a hole or beyond EOF (and no prealloc extent 220 * exists in the range). This is always <= delalloc_bytes and this 221 * is used only for files. Protected by 'lock'. 222 */ 223 u64 new_delalloc_bytes; 224 /* 225 * The offset of the last dir index key that was logged. 226 * This is used only for directories. 227 */ 228 u64 last_dir_index_offset; 229 }; 230 231 /* 232 * Total number of bytes pending defrag, used by stat to check whether 233 * it needs COW. Protected by 'lock'. 234 */ 235 u64 defrag_bytes; 236 237 /* 238 * The size of the file stored in the metadata on disk. data=ordered 239 * means the in-memory i_size might be larger than the size on disk 240 * because not all the blocks are written yet. Protected by 'lock'. 241 */ 242 u64 disk_i_size; 243 244 /* 245 * If this is a directory then index_cnt is the counter for the index 246 * number for new files that are created. For an empty directory, this 247 * must be initialized to BTRFS_DIR_START_INDEX. 248 */ 249 u64 index_cnt; 250 251 /* Cache the directory index number to speed the dir/file remove */ 252 u64 dir_index; 253 254 /* the fsync log has some corner cases that mean we have to check 255 * directories to see if any unlinks have been done before 256 * the directory was logged. See tree-log.c for all the 257 * details 258 */ 259 u64 last_unlink_trans; 260 261 /* 262 * The id/generation of the last transaction where this inode was 263 * either the source or the destination of a clone/dedupe operation. 264 * Used when logging an inode to know if there are shared extents that 265 * need special care when logging checksum items, to avoid duplicate 266 * checksum items in a log (which can lead to a corruption where we end 267 * up with missing checksum ranges after log replay). 268 * Protected by the vfs inode lock. 269 */ 270 u64 last_reflink_trans; 271 272 /* 273 * Number of bytes outstanding that are going to need csums. This is 274 * used in ENOSPC accounting. Protected by 'lock'. 275 */ 276 u64 csum_bytes; 277 278 /* Backwards incompatible flags, lower half of inode_item::flags */ 279 u32 flags; 280 /* Read-only compatibility flags, upper half of inode_item::flags */ 281 u32 ro_flags; 282 283 struct btrfs_block_rsv block_rsv; 284 285 struct btrfs_delayed_node *delayed_node; 286 287 /* File creation time. */ 288 u64 i_otime_sec; 289 u32 i_otime_nsec; 290 291 /* Hook into fs_info->delayed_iputs */ 292 struct list_head delayed_iput; 293 294 struct rw_semaphore i_mmap_lock; 295 struct inode vfs_inode; 296 }; 297 298 static inline u64 btrfs_get_first_dir_index_to_log(const struct btrfs_inode *inode) 299 { 300 return READ_ONCE(inode->first_dir_index_to_log); 301 } 302 303 static inline void btrfs_set_first_dir_index_to_log(struct btrfs_inode *inode, 304 u64 index) 305 { 306 WRITE_ONCE(inode->first_dir_index_to_log, index); 307 } 308 309 static inline struct btrfs_inode *BTRFS_I(const struct inode *inode) 310 { 311 return container_of(inode, struct btrfs_inode, vfs_inode); 312 } 313 314 static inline unsigned long btrfs_inode_hash(u64 objectid, 315 const struct btrfs_root *root) 316 { 317 u64 h = objectid ^ (root->root_key.objectid * GOLDEN_RATIO_PRIME); 318 319 #if BITS_PER_LONG == 32 320 h = (h >> 32) ^ (h & 0xffffffff); 321 #endif 322 323 return (unsigned long)h; 324 } 325 326 #if BITS_PER_LONG == 32 327 328 /* 329 * On 32 bit systems the i_ino of struct inode is 32 bits (unsigned long), so 330 * we use the inode's location objectid which is a u64 to avoid truncation. 331 */ 332 static inline u64 btrfs_ino(const struct btrfs_inode *inode) 333 { 334 u64 ino = inode->location.objectid; 335 336 /* type == BTRFS_ROOT_ITEM_KEY: subvol dir */ 337 if (inode->location.type == BTRFS_ROOT_ITEM_KEY) 338 ino = inode->vfs_inode.i_ino; 339 return ino; 340 } 341 342 #else 343 344 static inline u64 btrfs_ino(const struct btrfs_inode *inode) 345 { 346 return inode->vfs_inode.i_ino; 347 } 348 349 #endif 350 351 static inline void btrfs_i_size_write(struct btrfs_inode *inode, u64 size) 352 { 353 i_size_write(&inode->vfs_inode, size); 354 inode->disk_i_size = size; 355 } 356 357 static inline bool btrfs_is_free_space_inode(struct btrfs_inode *inode) 358 { 359 return test_bit(BTRFS_INODE_FREE_SPACE_INODE, &inode->runtime_flags); 360 } 361 362 static inline bool is_data_inode(struct inode *inode) 363 { 364 return btrfs_ino(BTRFS_I(inode)) != BTRFS_BTREE_INODE_OBJECTID; 365 } 366 367 static inline void btrfs_mod_outstanding_extents(struct btrfs_inode *inode, 368 int mod) 369 { 370 lockdep_assert_held(&inode->lock); 371 inode->outstanding_extents += mod; 372 if (btrfs_is_free_space_inode(inode)) 373 return; 374 trace_btrfs_inode_mod_outstanding_extents(inode->root, btrfs_ino(inode), 375 mod, inode->outstanding_extents); 376 } 377 378 /* 379 * Called every time after doing a buffered, direct IO or memory mapped write. 380 * 381 * This is to ensure that if we write to a file that was previously fsynced in 382 * the current transaction, then try to fsync it again in the same transaction, 383 * we will know that there were changes in the file and that it needs to be 384 * logged. 385 */ 386 static inline void btrfs_set_inode_last_sub_trans(struct btrfs_inode *inode) 387 { 388 spin_lock(&inode->lock); 389 inode->last_sub_trans = inode->root->log_transid; 390 spin_unlock(&inode->lock); 391 } 392 393 /* 394 * Should be called while holding the inode's VFS lock in exclusive mode, or 395 * while holding the inode's mmap lock (struct btrfs_inode::i_mmap_lock) in 396 * either shared or exclusive mode, or in a context where no one else can access 397 * the inode concurrently (during inode creation or when loading an inode from 398 * disk). 399 */ 400 static inline void btrfs_set_inode_full_sync(struct btrfs_inode *inode) 401 { 402 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags); 403 /* 404 * The inode may have been part of a reflink operation in the last 405 * transaction that modified it, and then a fsync has reset the 406 * last_reflink_trans to avoid subsequent fsyncs in the same 407 * transaction to do unnecessary work. So update last_reflink_trans 408 * to the last_trans value (we have to be pessimistic and assume a 409 * reflink happened). 410 * 411 * The ->last_trans is protected by the inode's spinlock and we can 412 * have a concurrent ordered extent completion update it. Also set 413 * last_reflink_trans to ->last_trans only if the former is less than 414 * the later, because we can be called in a context where 415 * last_reflink_trans was set to the current transaction generation 416 * while ->last_trans was not yet updated in the current transaction, 417 * and therefore has a lower value. 418 */ 419 spin_lock(&inode->lock); 420 if (inode->last_reflink_trans < inode->last_trans) 421 inode->last_reflink_trans = inode->last_trans; 422 spin_unlock(&inode->lock); 423 } 424 425 static inline bool btrfs_inode_in_log(struct btrfs_inode *inode, u64 generation) 426 { 427 bool ret = false; 428 429 spin_lock(&inode->lock); 430 if (inode->logged_trans == generation && 431 inode->last_sub_trans <= inode->last_log_commit && 432 inode->last_sub_trans <= btrfs_get_root_last_log_commit(inode->root)) 433 ret = true; 434 spin_unlock(&inode->lock); 435 return ret; 436 } 437 438 /* 439 * Check if the inode has flags compatible with compression 440 */ 441 static inline bool btrfs_inode_can_compress(const struct btrfs_inode *inode) 442 { 443 if (inode->flags & BTRFS_INODE_NODATACOW || 444 inode->flags & BTRFS_INODE_NODATASUM) 445 return false; 446 return true; 447 } 448 449 /* Array of bytes with variable length, hexadecimal format 0x1234 */ 450 #define CSUM_FMT "0x%*phN" 451 #define CSUM_FMT_VALUE(size, bytes) size, bytes 452 453 int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page, 454 u32 pgoff, u8 *csum, const u8 * const csum_expected); 455 bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev, 456 u32 bio_offset, struct bio_vec *bv); 457 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, 458 u64 *orig_start, u64 *orig_block_len, 459 u64 *ram_bytes, bool nowait, bool strict); 460 461 void btrfs_del_delalloc_inode(struct btrfs_inode *inode); 462 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry); 463 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index); 464 int btrfs_unlink_inode(struct btrfs_trans_handle *trans, 465 struct btrfs_inode *dir, struct btrfs_inode *inode, 466 const struct fscrypt_str *name); 467 int btrfs_add_link(struct btrfs_trans_handle *trans, 468 struct btrfs_inode *parent_inode, struct btrfs_inode *inode, 469 const struct fscrypt_str *name, int add_backref, u64 index); 470 int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry); 471 int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len, 472 int front); 473 474 int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context); 475 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr, 476 bool in_reclaim_context); 477 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end, 478 unsigned int extra_bits, 479 struct extent_state **cached_state); 480 481 struct btrfs_new_inode_args { 482 /* Input */ 483 struct inode *dir; 484 struct dentry *dentry; 485 struct inode *inode; 486 bool orphan; 487 bool subvol; 488 489 /* Output from btrfs_new_inode_prepare(), input to btrfs_create_new_inode(). */ 490 struct posix_acl *default_acl; 491 struct posix_acl *acl; 492 struct fscrypt_name fname; 493 }; 494 495 int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args, 496 unsigned int *trans_num_items); 497 int btrfs_create_new_inode(struct btrfs_trans_handle *trans, 498 struct btrfs_new_inode_args *args); 499 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args); 500 struct inode *btrfs_new_subvol_inode(struct mnt_idmap *idmap, 501 struct inode *dir); 502 void btrfs_set_delalloc_extent(struct btrfs_inode *inode, struct extent_state *state, 503 u32 bits); 504 void btrfs_clear_delalloc_extent(struct btrfs_inode *inode, 505 struct extent_state *state, u32 bits); 506 void btrfs_merge_delalloc_extent(struct btrfs_inode *inode, struct extent_state *new, 507 struct extent_state *other); 508 void btrfs_split_delalloc_extent(struct btrfs_inode *inode, 509 struct extent_state *orig, u64 split); 510 void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end); 511 void btrfs_evict_inode(struct inode *inode); 512 struct inode *btrfs_alloc_inode(struct super_block *sb); 513 void btrfs_destroy_inode(struct inode *inode); 514 void btrfs_free_inode(struct inode *inode); 515 int btrfs_drop_inode(struct inode *inode); 516 int __init btrfs_init_cachep(void); 517 void __cold btrfs_destroy_cachep(void); 518 struct inode *btrfs_iget_path(struct super_block *s, u64 ino, 519 struct btrfs_root *root, struct btrfs_path *path); 520 struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root); 521 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, 522 struct page *page, u64 start, u64 len); 523 int btrfs_update_inode(struct btrfs_trans_handle *trans, 524 struct btrfs_inode *inode); 525 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 526 struct btrfs_inode *inode); 527 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct btrfs_inode *inode); 528 int btrfs_orphan_cleanup(struct btrfs_root *root); 529 int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size); 530 void btrfs_add_delayed_iput(struct btrfs_inode *inode); 531 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info); 532 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info); 533 int btrfs_prealloc_file_range(struct inode *inode, int mode, 534 u64 start, u64 num_bytes, u64 min_size, 535 loff_t actual_len, u64 *alloc_hint); 536 int btrfs_prealloc_file_range_trans(struct inode *inode, 537 struct btrfs_trans_handle *trans, int mode, 538 u64 start, u64 num_bytes, u64 min_size, 539 loff_t actual_len, u64 *alloc_hint); 540 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page, 541 u64 start, u64 end, struct writeback_control *wbc); 542 int btrfs_writepage_cow_fixup(struct page *page); 543 int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info, 544 int compress_type); 545 int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, 546 u64 file_offset, u64 disk_bytenr, 547 u64 disk_io_size, 548 struct page **pages); 549 ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter, 550 struct btrfs_ioctl_encoded_io_args *encoded); 551 ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from, 552 const struct btrfs_ioctl_encoded_io_args *encoded); 553 554 ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter, 555 size_t done_before); 556 struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter, 557 size_t done_before); 558 struct btrfs_inode *btrfs_find_first_inode(struct btrfs_root *root, u64 min_ino); 559 560 extern const struct dentry_operations btrfs_dentry_operations; 561 562 /* Inode locking type flags, by default the exclusive lock is taken. */ 563 enum btrfs_ilock_type { 564 ENUM_BIT(BTRFS_ILOCK_SHARED), 565 ENUM_BIT(BTRFS_ILOCK_TRY), 566 ENUM_BIT(BTRFS_ILOCK_MMAP), 567 }; 568 569 int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags); 570 void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags); 571 void btrfs_update_inode_bytes(struct btrfs_inode *inode, const u64 add_bytes, 572 const u64 del_bytes); 573 void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end); 574 575 #endif 576