1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * NILFS inode operations. 4 * 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 6 * 7 * Written by Ryusuke Konishi. 8 * 9 */ 10 11 #include <linux/buffer_head.h> 12 #include <linux/gfp.h> 13 #include <linux/mpage.h> 14 #include <linux/pagemap.h> 15 #include <linux/writeback.h> 16 #include <linux/uio.h> 17 #include <linux/fiemap.h> 18 #include "nilfs.h" 19 #include "btnode.h" 20 #include "segment.h" 21 #include "page.h" 22 #include "mdt.h" 23 #include "cpfile.h" 24 #include "ifile.h" 25 26 /** 27 * struct nilfs_iget_args - arguments used during comparison between inodes 28 * @ino: inode number 29 * @cno: checkpoint number 30 * @root: pointer on NILFS root object (mounted checkpoint) 31 * @for_gc: inode for GC flag 32 * @for_btnc: inode for B-tree node cache flag 33 * @for_shadow: inode for shadowed page cache flag 34 */ 35 struct nilfs_iget_args { 36 u64 ino; 37 __u64 cno; 38 struct nilfs_root *root; 39 bool for_gc; 40 bool for_btnc; 41 bool for_shadow; 42 }; 43 44 static int nilfs_iget_test(struct inode *inode, void *opaque); 45 46 void nilfs_inode_add_blocks(struct inode *inode, int n) 47 { 48 struct nilfs_root *root = NILFS_I(inode)->i_root; 49 50 inode_add_bytes(inode, i_blocksize(inode) * n); 51 if (root) 52 atomic64_add(n, &root->blocks_count); 53 } 54 55 void nilfs_inode_sub_blocks(struct inode *inode, int n) 56 { 57 struct nilfs_root *root = NILFS_I(inode)->i_root; 58 59 inode_sub_bytes(inode, i_blocksize(inode) * n); 60 if (root) 61 atomic64_sub(n, &root->blocks_count); 62 } 63 64 /** 65 * nilfs_get_block() - get a file block on the filesystem (callback function) 66 * @inode - inode struct of the target file 67 * @blkoff - file block number 68 * @bh_result - buffer head to be mapped on 69 * @create - indicate whether allocating the block or not when it has not 70 * been allocated yet. 71 * 72 * This function does not issue actual read request of the specified data 73 * block. It is done by VFS. 74 */ 75 int nilfs_get_block(struct inode *inode, sector_t blkoff, 76 struct buffer_head *bh_result, int create) 77 { 78 struct nilfs_inode_info *ii = NILFS_I(inode); 79 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 80 __u64 blknum = 0; 81 int err = 0, ret; 82 unsigned int maxblocks = bh_result->b_size >> inode->i_blkbits; 83 84 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 85 ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks); 86 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 87 if (ret >= 0) { /* found */ 88 map_bh(bh_result, inode->i_sb, blknum); 89 if (ret > 0) 90 bh_result->b_size = (ret << inode->i_blkbits); 91 goto out; 92 } 93 /* data block was not found */ 94 if (ret == -ENOENT && create) { 95 struct nilfs_transaction_info ti; 96 97 bh_result->b_blocknr = 0; 98 err = nilfs_transaction_begin(inode->i_sb, &ti, 1); 99 if (unlikely(err)) 100 goto out; 101 err = nilfs_bmap_insert(ii->i_bmap, blkoff, 102 (unsigned long)bh_result); 103 if (unlikely(err != 0)) { 104 if (err == -EEXIST) { 105 /* 106 * The get_block() function could be called 107 * from multiple callers for an inode. 108 * However, the page having this block must 109 * be locked in this case. 110 */ 111 nilfs_warn(inode->i_sb, 112 "%s (ino=%lu): a race condition while inserting a data block at offset=%llu", 113 __func__, inode->i_ino, 114 (unsigned long long)blkoff); 115 err = 0; 116 } 117 nilfs_transaction_abort(inode->i_sb); 118 goto out; 119 } 120 nilfs_mark_inode_dirty_sync(inode); 121 nilfs_transaction_commit(inode->i_sb); /* never fails */ 122 /* Error handling should be detailed */ 123 set_buffer_new(bh_result); 124 set_buffer_delay(bh_result); 125 map_bh(bh_result, inode->i_sb, 0); 126 /* Disk block number must be changed to proper value */ 127 128 } else if (ret == -ENOENT) { 129 /* 130 * not found is not error (e.g. hole); must return without 131 * the mapped state flag. 132 */ 133 ; 134 } else { 135 err = ret; 136 } 137 138 out: 139 return err; 140 } 141 142 /** 143 * nilfs_read_folio() - implement read_folio() method of nilfs_aops {} 144 * address_space_operations. 145 * @file - file struct of the file to be read 146 * @folio - the folio to be read 147 */ 148 static int nilfs_read_folio(struct file *file, struct folio *folio) 149 { 150 return mpage_read_folio(folio, nilfs_get_block); 151 } 152 153 static void nilfs_readahead(struct readahead_control *rac) 154 { 155 mpage_readahead(rac, nilfs_get_block); 156 } 157 158 static int nilfs_writepages(struct address_space *mapping, 159 struct writeback_control *wbc) 160 { 161 struct inode *inode = mapping->host; 162 int err = 0; 163 164 if (sb_rdonly(inode->i_sb)) { 165 nilfs_clear_dirty_pages(mapping, false); 166 return -EROFS; 167 } 168 169 if (wbc->sync_mode == WB_SYNC_ALL) 170 err = nilfs_construct_dsync_segment(inode->i_sb, inode, 171 wbc->range_start, 172 wbc->range_end); 173 return err; 174 } 175 176 static int nilfs_writepage(struct page *page, struct writeback_control *wbc) 177 { 178 struct inode *inode = page->mapping->host; 179 int err; 180 181 if (sb_rdonly(inode->i_sb)) { 182 /* 183 * It means that filesystem was remounted in read-only 184 * mode because of error or metadata corruption. But we 185 * have dirty pages that try to be flushed in background. 186 * So, here we simply discard this dirty page. 187 */ 188 nilfs_clear_dirty_page(page, false); 189 unlock_page(page); 190 return -EROFS; 191 } 192 193 redirty_page_for_writepage(wbc, page); 194 unlock_page(page); 195 196 if (wbc->sync_mode == WB_SYNC_ALL) { 197 err = nilfs_construct_segment(inode->i_sb); 198 if (unlikely(err)) 199 return err; 200 } else if (wbc->for_reclaim) 201 nilfs_flush_segment(inode->i_sb, inode->i_ino); 202 203 return 0; 204 } 205 206 static bool nilfs_dirty_folio(struct address_space *mapping, 207 struct folio *folio) 208 { 209 struct inode *inode = mapping->host; 210 struct buffer_head *head; 211 unsigned int nr_dirty = 0; 212 bool ret = filemap_dirty_folio(mapping, folio); 213 214 /* 215 * The page may not be locked, eg if called from try_to_unmap_one() 216 */ 217 spin_lock(&mapping->private_lock); 218 head = folio_buffers(folio); 219 if (head) { 220 struct buffer_head *bh = head; 221 222 do { 223 /* Do not mark hole blocks dirty */ 224 if (buffer_dirty(bh) || !buffer_mapped(bh)) 225 continue; 226 227 set_buffer_dirty(bh); 228 nr_dirty++; 229 } while (bh = bh->b_this_page, bh != head); 230 } else if (ret) { 231 nr_dirty = 1 << (folio_shift(folio) - inode->i_blkbits); 232 } 233 spin_unlock(&mapping->private_lock); 234 235 if (nr_dirty) 236 nilfs_set_file_dirty(inode, nr_dirty); 237 return ret; 238 } 239 240 void nilfs_write_failed(struct address_space *mapping, loff_t to) 241 { 242 struct inode *inode = mapping->host; 243 244 if (to > inode->i_size) { 245 truncate_pagecache(inode, inode->i_size); 246 nilfs_truncate(inode); 247 } 248 } 249 250 static int nilfs_write_begin(struct file *file, struct address_space *mapping, 251 loff_t pos, unsigned len, 252 struct page **pagep, void **fsdata) 253 254 { 255 struct inode *inode = mapping->host; 256 int err = nilfs_transaction_begin(inode->i_sb, NULL, 1); 257 258 if (unlikely(err)) 259 return err; 260 261 err = block_write_begin(mapping, pos, len, pagep, nilfs_get_block); 262 if (unlikely(err)) { 263 nilfs_write_failed(mapping, pos + len); 264 nilfs_transaction_abort(inode->i_sb); 265 } 266 return err; 267 } 268 269 static int nilfs_write_end(struct file *file, struct address_space *mapping, 270 loff_t pos, unsigned len, unsigned copied, 271 struct page *page, void *fsdata) 272 { 273 struct inode *inode = mapping->host; 274 unsigned int start = pos & (PAGE_SIZE - 1); 275 unsigned int nr_dirty; 276 int err; 277 278 nr_dirty = nilfs_page_count_clean_buffers(page, start, 279 start + copied); 280 copied = generic_write_end(file, mapping, pos, len, copied, page, 281 fsdata); 282 nilfs_set_file_dirty(inode, nr_dirty); 283 err = nilfs_transaction_commit(inode->i_sb); 284 return err ? : copied; 285 } 286 287 static ssize_t 288 nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 289 { 290 struct inode *inode = file_inode(iocb->ki_filp); 291 292 if (iov_iter_rw(iter) == WRITE) 293 return 0; 294 295 /* Needs synchronization with the cleaner */ 296 return blockdev_direct_IO(iocb, inode, iter, nilfs_get_block); 297 } 298 299 const struct address_space_operations nilfs_aops = { 300 .writepage = nilfs_writepage, 301 .read_folio = nilfs_read_folio, 302 .writepages = nilfs_writepages, 303 .dirty_folio = nilfs_dirty_folio, 304 .readahead = nilfs_readahead, 305 .write_begin = nilfs_write_begin, 306 .write_end = nilfs_write_end, 307 /* .releasepage = nilfs_releasepage, */ 308 .invalidate_folio = block_invalidate_folio, 309 .direct_IO = nilfs_direct_IO, 310 .is_partially_uptodate = block_is_partially_uptodate, 311 }; 312 313 static int nilfs_insert_inode_locked(struct inode *inode, 314 struct nilfs_root *root, 315 unsigned long ino) 316 { 317 struct nilfs_iget_args args = { 318 .ino = ino, .root = root, .cno = 0, .for_gc = false, 319 .for_btnc = false, .for_shadow = false 320 }; 321 322 return insert_inode_locked4(inode, ino, nilfs_iget_test, &args); 323 } 324 325 struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) 326 { 327 struct super_block *sb = dir->i_sb; 328 struct the_nilfs *nilfs = sb->s_fs_info; 329 struct inode *inode; 330 struct nilfs_inode_info *ii; 331 struct nilfs_root *root; 332 int err = -ENOMEM; 333 ino_t ino; 334 335 inode = new_inode(sb); 336 if (unlikely(!inode)) 337 goto failed; 338 339 mapping_set_gfp_mask(inode->i_mapping, 340 mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); 341 342 root = NILFS_I(dir)->i_root; 343 ii = NILFS_I(inode); 344 ii->i_state = BIT(NILFS_I_NEW); 345 ii->i_root = root; 346 347 err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh); 348 if (unlikely(err)) 349 goto failed_ifile_create_inode; 350 /* reference count of i_bh inherits from nilfs_mdt_read_block() */ 351 352 atomic64_inc(&root->inodes_count); 353 inode_init_owner(&init_user_ns, inode, dir, mode); 354 inode->i_ino = ino; 355 inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); 356 357 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { 358 err = nilfs_bmap_read(ii->i_bmap, NULL); 359 if (err < 0) 360 goto failed_after_creation; 361 362 set_bit(NILFS_I_BMAP, &ii->i_state); 363 /* No lock is needed; iget() ensures it. */ 364 } 365 366 ii->i_flags = nilfs_mask_flags( 367 mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED); 368 369 /* ii->i_file_acl = 0; */ 370 /* ii->i_dir_acl = 0; */ 371 ii->i_dir_start_lookup = 0; 372 nilfs_set_inode_flags(inode); 373 spin_lock(&nilfs->ns_next_gen_lock); 374 inode->i_generation = nilfs->ns_next_generation++; 375 spin_unlock(&nilfs->ns_next_gen_lock); 376 if (nilfs_insert_inode_locked(inode, root, ino) < 0) { 377 err = -EIO; 378 goto failed_after_creation; 379 } 380 381 err = nilfs_init_acl(inode, dir); 382 if (unlikely(err)) 383 /* 384 * Never occur. When supporting nilfs_init_acl(), 385 * proper cancellation of above jobs should be considered. 386 */ 387 goto failed_after_creation; 388 389 return inode; 390 391 failed_after_creation: 392 clear_nlink(inode); 393 if (inode->i_state & I_NEW) 394 unlock_new_inode(inode); 395 iput(inode); /* 396 * raw_inode will be deleted through 397 * nilfs_evict_inode(). 398 */ 399 goto failed; 400 401 failed_ifile_create_inode: 402 make_bad_inode(inode); 403 iput(inode); 404 failed: 405 return ERR_PTR(err); 406 } 407 408 void nilfs_set_inode_flags(struct inode *inode) 409 { 410 unsigned int flags = NILFS_I(inode)->i_flags; 411 unsigned int new_fl = 0; 412 413 if (flags & FS_SYNC_FL) 414 new_fl |= S_SYNC; 415 if (flags & FS_APPEND_FL) 416 new_fl |= S_APPEND; 417 if (flags & FS_IMMUTABLE_FL) 418 new_fl |= S_IMMUTABLE; 419 if (flags & FS_NOATIME_FL) 420 new_fl |= S_NOATIME; 421 if (flags & FS_DIRSYNC_FL) 422 new_fl |= S_DIRSYNC; 423 inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE | 424 S_NOATIME | S_DIRSYNC); 425 } 426 427 int nilfs_read_inode_common(struct inode *inode, 428 struct nilfs_inode *raw_inode) 429 { 430 struct nilfs_inode_info *ii = NILFS_I(inode); 431 int err; 432 433 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 434 i_uid_write(inode, le32_to_cpu(raw_inode->i_uid)); 435 i_gid_write(inode, le32_to_cpu(raw_inode->i_gid)); 436 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); 437 inode->i_size = le64_to_cpu(raw_inode->i_size); 438 inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 439 inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime); 440 inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 441 inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 442 inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec); 443 inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 444 if (inode->i_nlink == 0) 445 return -ESTALE; /* this inode is deleted */ 446 447 inode->i_blocks = le64_to_cpu(raw_inode->i_blocks); 448 ii->i_flags = le32_to_cpu(raw_inode->i_flags); 449 #if 0 450 ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl); 451 ii->i_dir_acl = S_ISREG(inode->i_mode) ? 452 0 : le32_to_cpu(raw_inode->i_dir_acl); 453 #endif 454 ii->i_dir_start_lookup = 0; 455 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 456 457 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 458 S_ISLNK(inode->i_mode)) { 459 err = nilfs_bmap_read(ii->i_bmap, raw_inode); 460 if (err < 0) 461 return err; 462 set_bit(NILFS_I_BMAP, &ii->i_state); 463 /* No lock is needed; iget() ensures it. */ 464 } 465 return 0; 466 } 467 468 static int __nilfs_read_inode(struct super_block *sb, 469 struct nilfs_root *root, unsigned long ino, 470 struct inode *inode) 471 { 472 struct the_nilfs *nilfs = sb->s_fs_info; 473 struct buffer_head *bh; 474 struct nilfs_inode *raw_inode; 475 int err; 476 477 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 478 err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh); 479 if (unlikely(err)) 480 goto bad_inode; 481 482 raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh); 483 484 err = nilfs_read_inode_common(inode, raw_inode); 485 if (err) 486 goto failed_unmap; 487 488 if (S_ISREG(inode->i_mode)) { 489 inode->i_op = &nilfs_file_inode_operations; 490 inode->i_fop = &nilfs_file_operations; 491 inode->i_mapping->a_ops = &nilfs_aops; 492 } else if (S_ISDIR(inode->i_mode)) { 493 inode->i_op = &nilfs_dir_inode_operations; 494 inode->i_fop = &nilfs_dir_operations; 495 inode->i_mapping->a_ops = &nilfs_aops; 496 } else if (S_ISLNK(inode->i_mode)) { 497 inode->i_op = &nilfs_symlink_inode_operations; 498 inode_nohighmem(inode); 499 inode->i_mapping->a_ops = &nilfs_aops; 500 } else { 501 inode->i_op = &nilfs_special_inode_operations; 502 init_special_inode( 503 inode, inode->i_mode, 504 huge_decode_dev(le64_to_cpu(raw_inode->i_device_code))); 505 } 506 nilfs_ifile_unmap_inode(root->ifile, ino, bh); 507 brelse(bh); 508 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 509 nilfs_set_inode_flags(inode); 510 mapping_set_gfp_mask(inode->i_mapping, 511 mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); 512 return 0; 513 514 failed_unmap: 515 nilfs_ifile_unmap_inode(root->ifile, ino, bh); 516 brelse(bh); 517 518 bad_inode: 519 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 520 return err; 521 } 522 523 static int nilfs_iget_test(struct inode *inode, void *opaque) 524 { 525 struct nilfs_iget_args *args = opaque; 526 struct nilfs_inode_info *ii; 527 528 if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root) 529 return 0; 530 531 ii = NILFS_I(inode); 532 if (test_bit(NILFS_I_BTNC, &ii->i_state)) { 533 if (!args->for_btnc) 534 return 0; 535 } else if (args->for_btnc) { 536 return 0; 537 } 538 if (test_bit(NILFS_I_SHADOW, &ii->i_state)) { 539 if (!args->for_shadow) 540 return 0; 541 } else if (args->for_shadow) { 542 return 0; 543 } 544 545 if (!test_bit(NILFS_I_GCINODE, &ii->i_state)) 546 return !args->for_gc; 547 548 return args->for_gc && args->cno == ii->i_cno; 549 } 550 551 static int nilfs_iget_set(struct inode *inode, void *opaque) 552 { 553 struct nilfs_iget_args *args = opaque; 554 555 inode->i_ino = args->ino; 556 NILFS_I(inode)->i_cno = args->cno; 557 NILFS_I(inode)->i_root = args->root; 558 if (args->root && args->ino == NILFS_ROOT_INO) 559 nilfs_get_root(args->root); 560 561 if (args->for_gc) 562 NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE); 563 if (args->for_btnc) 564 NILFS_I(inode)->i_state |= BIT(NILFS_I_BTNC); 565 if (args->for_shadow) 566 NILFS_I(inode)->i_state |= BIT(NILFS_I_SHADOW); 567 return 0; 568 } 569 570 struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root, 571 unsigned long ino) 572 { 573 struct nilfs_iget_args args = { 574 .ino = ino, .root = root, .cno = 0, .for_gc = false, 575 .for_btnc = false, .for_shadow = false 576 }; 577 578 return ilookup5(sb, ino, nilfs_iget_test, &args); 579 } 580 581 struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root, 582 unsigned long ino) 583 { 584 struct nilfs_iget_args args = { 585 .ino = ino, .root = root, .cno = 0, .for_gc = false, 586 .for_btnc = false, .for_shadow = false 587 }; 588 589 return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args); 590 } 591 592 struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root, 593 unsigned long ino) 594 { 595 struct inode *inode; 596 int err; 597 598 inode = nilfs_iget_locked(sb, root, ino); 599 if (unlikely(!inode)) 600 return ERR_PTR(-ENOMEM); 601 if (!(inode->i_state & I_NEW)) 602 return inode; 603 604 err = __nilfs_read_inode(sb, root, ino, inode); 605 if (unlikely(err)) { 606 iget_failed(inode); 607 return ERR_PTR(err); 608 } 609 unlock_new_inode(inode); 610 return inode; 611 } 612 613 struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino, 614 __u64 cno) 615 { 616 struct nilfs_iget_args args = { 617 .ino = ino, .root = NULL, .cno = cno, .for_gc = true, 618 .for_btnc = false, .for_shadow = false 619 }; 620 struct inode *inode; 621 int err; 622 623 inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args); 624 if (unlikely(!inode)) 625 return ERR_PTR(-ENOMEM); 626 if (!(inode->i_state & I_NEW)) 627 return inode; 628 629 err = nilfs_init_gcinode(inode); 630 if (unlikely(err)) { 631 iget_failed(inode); 632 return ERR_PTR(err); 633 } 634 unlock_new_inode(inode); 635 return inode; 636 } 637 638 /** 639 * nilfs_attach_btree_node_cache - attach a B-tree node cache to the inode 640 * @inode: inode object 641 * 642 * nilfs_attach_btree_node_cache() attaches a B-tree node cache to @inode, 643 * or does nothing if the inode already has it. This function allocates 644 * an additional inode to maintain page cache of B-tree nodes one-on-one. 645 * 646 * Return Value: On success, 0 is returned. On errors, one of the following 647 * negative error code is returned. 648 * 649 * %-ENOMEM - Insufficient memory available. 650 */ 651 int nilfs_attach_btree_node_cache(struct inode *inode) 652 { 653 struct nilfs_inode_info *ii = NILFS_I(inode); 654 struct inode *btnc_inode; 655 struct nilfs_iget_args args; 656 657 if (ii->i_assoc_inode) 658 return 0; 659 660 args.ino = inode->i_ino; 661 args.root = ii->i_root; 662 args.cno = ii->i_cno; 663 args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0; 664 args.for_btnc = true; 665 args.for_shadow = test_bit(NILFS_I_SHADOW, &ii->i_state) != 0; 666 667 btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test, 668 nilfs_iget_set, &args); 669 if (unlikely(!btnc_inode)) 670 return -ENOMEM; 671 if (btnc_inode->i_state & I_NEW) { 672 nilfs_init_btnc_inode(btnc_inode); 673 unlock_new_inode(btnc_inode); 674 } 675 NILFS_I(btnc_inode)->i_assoc_inode = inode; 676 NILFS_I(btnc_inode)->i_bmap = ii->i_bmap; 677 ii->i_assoc_inode = btnc_inode; 678 679 return 0; 680 } 681 682 /** 683 * nilfs_detach_btree_node_cache - detach the B-tree node cache from the inode 684 * @inode: inode object 685 * 686 * nilfs_detach_btree_node_cache() detaches the B-tree node cache and its 687 * holder inode bound to @inode, or does nothing if @inode doesn't have it. 688 */ 689 void nilfs_detach_btree_node_cache(struct inode *inode) 690 { 691 struct nilfs_inode_info *ii = NILFS_I(inode); 692 struct inode *btnc_inode = ii->i_assoc_inode; 693 694 if (btnc_inode) { 695 NILFS_I(btnc_inode)->i_assoc_inode = NULL; 696 ii->i_assoc_inode = NULL; 697 iput(btnc_inode); 698 } 699 } 700 701 /** 702 * nilfs_iget_for_shadow - obtain inode for shadow mapping 703 * @inode: inode object that uses shadow mapping 704 * 705 * nilfs_iget_for_shadow() allocates a pair of inodes that holds page 706 * caches for shadow mapping. The page cache for data pages is set up 707 * in one inode and the one for b-tree node pages is set up in the 708 * other inode, which is attached to the former inode. 709 * 710 * Return Value: On success, a pointer to the inode for data pages is 711 * returned. On errors, one of the following negative error code is returned 712 * in a pointer type. 713 * 714 * %-ENOMEM - Insufficient memory available. 715 */ 716 struct inode *nilfs_iget_for_shadow(struct inode *inode) 717 { 718 struct nilfs_iget_args args = { 719 .ino = inode->i_ino, .root = NULL, .cno = 0, .for_gc = false, 720 .for_btnc = false, .for_shadow = true 721 }; 722 struct inode *s_inode; 723 int err; 724 725 s_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test, 726 nilfs_iget_set, &args); 727 if (unlikely(!s_inode)) 728 return ERR_PTR(-ENOMEM); 729 if (!(s_inode->i_state & I_NEW)) 730 return inode; 731 732 NILFS_I(s_inode)->i_flags = 0; 733 memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap)); 734 mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS); 735 736 err = nilfs_attach_btree_node_cache(s_inode); 737 if (unlikely(err)) { 738 iget_failed(s_inode); 739 return ERR_PTR(err); 740 } 741 unlock_new_inode(s_inode); 742 return s_inode; 743 } 744 745 void nilfs_write_inode_common(struct inode *inode, 746 struct nilfs_inode *raw_inode, int has_bmap) 747 { 748 struct nilfs_inode_info *ii = NILFS_I(inode); 749 750 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 751 raw_inode->i_uid = cpu_to_le32(i_uid_read(inode)); 752 raw_inode->i_gid = cpu_to_le32(i_gid_read(inode)); 753 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 754 raw_inode->i_size = cpu_to_le64(inode->i_size); 755 raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); 756 raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); 757 raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); 758 raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); 759 raw_inode->i_blocks = cpu_to_le64(inode->i_blocks); 760 761 raw_inode->i_flags = cpu_to_le32(ii->i_flags); 762 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 763 764 if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) { 765 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 766 767 /* zero-fill unused portion in the case of super root block */ 768 raw_inode->i_xattr = 0; 769 raw_inode->i_pad = 0; 770 memset((void *)raw_inode + sizeof(*raw_inode), 0, 771 nilfs->ns_inode_size - sizeof(*raw_inode)); 772 } 773 774 if (has_bmap) 775 nilfs_bmap_write(ii->i_bmap, raw_inode); 776 else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) 777 raw_inode->i_device_code = 778 cpu_to_le64(huge_encode_dev(inode->i_rdev)); 779 /* 780 * When extending inode, nilfs->ns_inode_size should be checked 781 * for substitutions of appended fields. 782 */ 783 } 784 785 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags) 786 { 787 ino_t ino = inode->i_ino; 788 struct nilfs_inode_info *ii = NILFS_I(inode); 789 struct inode *ifile = ii->i_root->ifile; 790 struct nilfs_inode *raw_inode; 791 792 raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh); 793 794 if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state)) 795 memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size); 796 if (flags & I_DIRTY_DATASYNC) 797 set_bit(NILFS_I_INODE_SYNC, &ii->i_state); 798 799 nilfs_write_inode_common(inode, raw_inode, 0); 800 /* 801 * XXX: call with has_bmap = 0 is a workaround to avoid 802 * deadlock of bmap. This delays update of i_bmap to just 803 * before writing. 804 */ 805 806 nilfs_ifile_unmap_inode(ifile, ino, ibh); 807 } 808 809 #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */ 810 811 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii, 812 unsigned long from) 813 { 814 __u64 b; 815 int ret; 816 817 if (!test_bit(NILFS_I_BMAP, &ii->i_state)) 818 return; 819 repeat: 820 ret = nilfs_bmap_last_key(ii->i_bmap, &b); 821 if (ret == -ENOENT) 822 return; 823 else if (ret < 0) 824 goto failed; 825 826 if (b < from) 827 return; 828 829 b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from); 830 ret = nilfs_bmap_truncate(ii->i_bmap, b); 831 nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb); 832 if (!ret || (ret == -ENOMEM && 833 nilfs_bmap_truncate(ii->i_bmap, b) == 0)) 834 goto repeat; 835 836 failed: 837 nilfs_warn(ii->vfs_inode.i_sb, "error %d truncating bmap (ino=%lu)", 838 ret, ii->vfs_inode.i_ino); 839 } 840 841 void nilfs_truncate(struct inode *inode) 842 { 843 unsigned long blkoff; 844 unsigned int blocksize; 845 struct nilfs_transaction_info ti; 846 struct super_block *sb = inode->i_sb; 847 struct nilfs_inode_info *ii = NILFS_I(inode); 848 849 if (!test_bit(NILFS_I_BMAP, &ii->i_state)) 850 return; 851 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 852 return; 853 854 blocksize = sb->s_blocksize; 855 blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits; 856 nilfs_transaction_begin(sb, &ti, 0); /* never fails */ 857 858 block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block); 859 860 nilfs_truncate_bmap(ii, blkoff); 861 862 inode->i_mtime = inode->i_ctime = current_time(inode); 863 if (IS_SYNC(inode)) 864 nilfs_set_transaction_flag(NILFS_TI_SYNC); 865 866 nilfs_mark_inode_dirty(inode); 867 nilfs_set_file_dirty(inode, 0); 868 nilfs_transaction_commit(sb); 869 /* 870 * May construct a logical segment and may fail in sync mode. 871 * But truncate has no return value. 872 */ 873 } 874 875 static void nilfs_clear_inode(struct inode *inode) 876 { 877 struct nilfs_inode_info *ii = NILFS_I(inode); 878 879 /* 880 * Free resources allocated in nilfs_read_inode(), here. 881 */ 882 BUG_ON(!list_empty(&ii->i_dirty)); 883 brelse(ii->i_bh); 884 ii->i_bh = NULL; 885 886 if (nilfs_is_metadata_file_inode(inode)) 887 nilfs_mdt_clear(inode); 888 889 if (test_bit(NILFS_I_BMAP, &ii->i_state)) 890 nilfs_bmap_clear(ii->i_bmap); 891 892 if (!test_bit(NILFS_I_BTNC, &ii->i_state)) 893 nilfs_detach_btree_node_cache(inode); 894 895 if (ii->i_root && inode->i_ino == NILFS_ROOT_INO) 896 nilfs_put_root(ii->i_root); 897 } 898 899 void nilfs_evict_inode(struct inode *inode) 900 { 901 struct nilfs_transaction_info ti; 902 struct super_block *sb = inode->i_sb; 903 struct nilfs_inode_info *ii = NILFS_I(inode); 904 int ret; 905 906 if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) { 907 truncate_inode_pages_final(&inode->i_data); 908 clear_inode(inode); 909 nilfs_clear_inode(inode); 910 return; 911 } 912 nilfs_transaction_begin(sb, &ti, 0); /* never fails */ 913 914 truncate_inode_pages_final(&inode->i_data); 915 916 /* TODO: some of the following operations may fail. */ 917 nilfs_truncate_bmap(ii, 0); 918 nilfs_mark_inode_dirty(inode); 919 clear_inode(inode); 920 921 ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino); 922 if (!ret) 923 atomic64_dec(&ii->i_root->inodes_count); 924 925 nilfs_clear_inode(inode); 926 927 if (IS_SYNC(inode)) 928 nilfs_set_transaction_flag(NILFS_TI_SYNC); 929 nilfs_transaction_commit(sb); 930 /* 931 * May construct a logical segment and may fail in sync mode. 932 * But delete_inode has no return value. 933 */ 934 } 935 936 int nilfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, 937 struct iattr *iattr) 938 { 939 struct nilfs_transaction_info ti; 940 struct inode *inode = d_inode(dentry); 941 struct super_block *sb = inode->i_sb; 942 int err; 943 944 err = setattr_prepare(&init_user_ns, dentry, iattr); 945 if (err) 946 return err; 947 948 err = nilfs_transaction_begin(sb, &ti, 0); 949 if (unlikely(err)) 950 return err; 951 952 if ((iattr->ia_valid & ATTR_SIZE) && 953 iattr->ia_size != i_size_read(inode)) { 954 inode_dio_wait(inode); 955 truncate_setsize(inode, iattr->ia_size); 956 nilfs_truncate(inode); 957 } 958 959 setattr_copy(&init_user_ns, inode, iattr); 960 mark_inode_dirty(inode); 961 962 if (iattr->ia_valid & ATTR_MODE) { 963 err = nilfs_acl_chmod(inode); 964 if (unlikely(err)) 965 goto out_err; 966 } 967 968 return nilfs_transaction_commit(sb); 969 970 out_err: 971 nilfs_transaction_abort(sb); 972 return err; 973 } 974 975 int nilfs_permission(struct user_namespace *mnt_userns, struct inode *inode, 976 int mask) 977 { 978 struct nilfs_root *root = NILFS_I(inode)->i_root; 979 980 if ((mask & MAY_WRITE) && root && 981 root->cno != NILFS_CPTREE_CURRENT_CNO) 982 return -EROFS; /* snapshot is not writable */ 983 984 return generic_permission(&init_user_ns, inode, mask); 985 } 986 987 int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh) 988 { 989 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 990 struct nilfs_inode_info *ii = NILFS_I(inode); 991 int err; 992 993 spin_lock(&nilfs->ns_inode_lock); 994 if (ii->i_bh == NULL) { 995 spin_unlock(&nilfs->ns_inode_lock); 996 err = nilfs_ifile_get_inode_block(ii->i_root->ifile, 997 inode->i_ino, pbh); 998 if (unlikely(err)) 999 return err; 1000 spin_lock(&nilfs->ns_inode_lock); 1001 if (ii->i_bh == NULL) 1002 ii->i_bh = *pbh; 1003 else { 1004 brelse(*pbh); 1005 *pbh = ii->i_bh; 1006 } 1007 } else 1008 *pbh = ii->i_bh; 1009 1010 get_bh(*pbh); 1011 spin_unlock(&nilfs->ns_inode_lock); 1012 return 0; 1013 } 1014 1015 int nilfs_inode_dirty(struct inode *inode) 1016 { 1017 struct nilfs_inode_info *ii = NILFS_I(inode); 1018 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 1019 int ret = 0; 1020 1021 if (!list_empty(&ii->i_dirty)) { 1022 spin_lock(&nilfs->ns_inode_lock); 1023 ret = test_bit(NILFS_I_DIRTY, &ii->i_state) || 1024 test_bit(NILFS_I_BUSY, &ii->i_state); 1025 spin_unlock(&nilfs->ns_inode_lock); 1026 } 1027 return ret; 1028 } 1029 1030 int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty) 1031 { 1032 struct nilfs_inode_info *ii = NILFS_I(inode); 1033 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 1034 1035 atomic_add(nr_dirty, &nilfs->ns_ndirtyblks); 1036 1037 if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state)) 1038 return 0; 1039 1040 spin_lock(&nilfs->ns_inode_lock); 1041 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) && 1042 !test_bit(NILFS_I_BUSY, &ii->i_state)) { 1043 /* 1044 * Because this routine may race with nilfs_dispose_list(), 1045 * we have to check NILFS_I_QUEUED here, too. 1046 */ 1047 if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) { 1048 /* 1049 * This will happen when somebody is freeing 1050 * this inode. 1051 */ 1052 nilfs_warn(inode->i_sb, 1053 "cannot set file dirty (ino=%lu): the file is being freed", 1054 inode->i_ino); 1055 spin_unlock(&nilfs->ns_inode_lock); 1056 return -EINVAL; /* 1057 * NILFS_I_DIRTY may remain for 1058 * freeing inode. 1059 */ 1060 } 1061 list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files); 1062 set_bit(NILFS_I_QUEUED, &ii->i_state); 1063 } 1064 spin_unlock(&nilfs->ns_inode_lock); 1065 return 0; 1066 } 1067 1068 int __nilfs_mark_inode_dirty(struct inode *inode, int flags) 1069 { 1070 struct buffer_head *ibh; 1071 int err; 1072 1073 err = nilfs_load_inode_block(inode, &ibh); 1074 if (unlikely(err)) { 1075 nilfs_warn(inode->i_sb, 1076 "cannot mark inode dirty (ino=%lu): error %d loading inode block", 1077 inode->i_ino, err); 1078 return err; 1079 } 1080 nilfs_update_inode(inode, ibh, flags); 1081 mark_buffer_dirty(ibh); 1082 nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile); 1083 brelse(ibh); 1084 return 0; 1085 } 1086 1087 /** 1088 * nilfs_dirty_inode - reflect changes on given inode to an inode block. 1089 * @inode: inode of the file to be registered. 1090 * 1091 * nilfs_dirty_inode() loads a inode block containing the specified 1092 * @inode and copies data from a nilfs_inode to a corresponding inode 1093 * entry in the inode block. This operation is excluded from the segment 1094 * construction. This function can be called both as a single operation 1095 * and as a part of indivisible file operations. 1096 */ 1097 void nilfs_dirty_inode(struct inode *inode, int flags) 1098 { 1099 struct nilfs_transaction_info ti; 1100 struct nilfs_mdt_info *mdi = NILFS_MDT(inode); 1101 1102 if (is_bad_inode(inode)) { 1103 nilfs_warn(inode->i_sb, 1104 "tried to mark bad_inode dirty. ignored."); 1105 dump_stack(); 1106 return; 1107 } 1108 if (mdi) { 1109 nilfs_mdt_mark_dirty(inode); 1110 return; 1111 } 1112 nilfs_transaction_begin(inode->i_sb, &ti, 0); 1113 __nilfs_mark_inode_dirty(inode, flags); 1114 nilfs_transaction_commit(inode->i_sb); /* never fails */ 1115 } 1116 1117 int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 1118 __u64 start, __u64 len) 1119 { 1120 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 1121 __u64 logical = 0, phys = 0, size = 0; 1122 __u32 flags = 0; 1123 loff_t isize; 1124 sector_t blkoff, end_blkoff; 1125 sector_t delalloc_blkoff; 1126 unsigned long delalloc_blklen; 1127 unsigned int blkbits = inode->i_blkbits; 1128 int ret, n; 1129 1130 ret = fiemap_prep(inode, fieinfo, start, &len, 0); 1131 if (ret) 1132 return ret; 1133 1134 inode_lock(inode); 1135 1136 isize = i_size_read(inode); 1137 1138 blkoff = start >> blkbits; 1139 end_blkoff = (start + len - 1) >> blkbits; 1140 1141 delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff, 1142 &delalloc_blkoff); 1143 1144 do { 1145 __u64 blkphy; 1146 unsigned int maxblocks; 1147 1148 if (delalloc_blklen && blkoff == delalloc_blkoff) { 1149 if (size) { 1150 /* End of the current extent */ 1151 ret = fiemap_fill_next_extent( 1152 fieinfo, logical, phys, size, flags); 1153 if (ret) 1154 break; 1155 } 1156 if (blkoff > end_blkoff) 1157 break; 1158 1159 flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC; 1160 logical = blkoff << blkbits; 1161 phys = 0; 1162 size = delalloc_blklen << blkbits; 1163 1164 blkoff = delalloc_blkoff + delalloc_blklen; 1165 delalloc_blklen = nilfs_find_uncommitted_extent( 1166 inode, blkoff, &delalloc_blkoff); 1167 continue; 1168 } 1169 1170 /* 1171 * Limit the number of blocks that we look up so as 1172 * not to get into the next delayed allocation extent. 1173 */ 1174 maxblocks = INT_MAX; 1175 if (delalloc_blklen) 1176 maxblocks = min_t(sector_t, delalloc_blkoff - blkoff, 1177 maxblocks); 1178 blkphy = 0; 1179 1180 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 1181 n = nilfs_bmap_lookup_contig( 1182 NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks); 1183 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); 1184 1185 if (n < 0) { 1186 int past_eof; 1187 1188 if (unlikely(n != -ENOENT)) 1189 break; /* error */ 1190 1191 /* HOLE */ 1192 blkoff++; 1193 past_eof = ((blkoff << blkbits) >= isize); 1194 1195 if (size) { 1196 /* End of the current extent */ 1197 1198 if (past_eof) 1199 flags |= FIEMAP_EXTENT_LAST; 1200 1201 ret = fiemap_fill_next_extent( 1202 fieinfo, logical, phys, size, flags); 1203 if (ret) 1204 break; 1205 size = 0; 1206 } 1207 if (blkoff > end_blkoff || past_eof) 1208 break; 1209 } else { 1210 if (size) { 1211 if (phys && blkphy << blkbits == phys + size) { 1212 /* The current extent goes on */ 1213 size += n << blkbits; 1214 } else { 1215 /* Terminate the current extent */ 1216 ret = fiemap_fill_next_extent( 1217 fieinfo, logical, phys, size, 1218 flags); 1219 if (ret || blkoff > end_blkoff) 1220 break; 1221 1222 /* Start another extent */ 1223 flags = FIEMAP_EXTENT_MERGED; 1224 logical = blkoff << blkbits; 1225 phys = blkphy << blkbits; 1226 size = n << blkbits; 1227 } 1228 } else { 1229 /* Start a new extent */ 1230 flags = FIEMAP_EXTENT_MERGED; 1231 logical = blkoff << blkbits; 1232 phys = blkphy << blkbits; 1233 size = n << blkbits; 1234 } 1235 blkoff += n; 1236 } 1237 cond_resched(); 1238 } while (true); 1239 1240 /* If ret is 1 then we just hit the end of the extent array */ 1241 if (ret == 1) 1242 ret = 0; 1243 1244 inode_unlock(inode); 1245 return ret; 1246 } 1247