1 /* 2 * inode.c - NILFS inode operations. 3 * 4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 19 * 20 * Written by Ryusuke Konishi <ryusuke@osrg.net> 21 * 22 */ 23 24 #include <linux/buffer_head.h> 25 #include <linux/gfp.h> 26 #include <linux/mpage.h> 27 #include <linux/writeback.h> 28 #include <linux/uio.h> 29 #include "nilfs.h" 30 #include "segment.h" 31 #include "page.h" 32 #include "mdt.h" 33 #include "cpfile.h" 34 #include "ifile.h" 35 36 37 /** 38 * nilfs_get_block() - get a file block on the filesystem (callback function) 39 * @inode - inode struct of the target file 40 * @blkoff - file block number 41 * @bh_result - buffer head to be mapped on 42 * @create - indicate whether allocating the block or not when it has not 43 * been allocated yet. 44 * 45 * This function does not issue actual read request of the specified data 46 * block. It is done by VFS. 47 */ 48 int nilfs_get_block(struct inode *inode, sector_t blkoff, 49 struct buffer_head *bh_result, int create) 50 { 51 struct nilfs_inode_info *ii = NILFS_I(inode); 52 __u64 blknum = 0; 53 int err = 0, ret; 54 struct inode *dat = nilfs_dat_inode(NILFS_I_NILFS(inode)); 55 unsigned maxblocks = bh_result->b_size >> inode->i_blkbits; 56 57 down_read(&NILFS_MDT(dat)->mi_sem); 58 ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks); 59 up_read(&NILFS_MDT(dat)->mi_sem); 60 if (ret >= 0) { /* found */ 61 map_bh(bh_result, inode->i_sb, blknum); 62 if (ret > 0) 63 bh_result->b_size = (ret << inode->i_blkbits); 64 goto out; 65 } 66 /* data block was not found */ 67 if (ret == -ENOENT && create) { 68 struct nilfs_transaction_info ti; 69 70 bh_result->b_blocknr = 0; 71 err = nilfs_transaction_begin(inode->i_sb, &ti, 1); 72 if (unlikely(err)) 73 goto out; 74 err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff, 75 (unsigned long)bh_result); 76 if (unlikely(err != 0)) { 77 if (err == -EEXIST) { 78 /* 79 * The get_block() function could be called 80 * from multiple callers for an inode. 81 * However, the page having this block must 82 * be locked in this case. 83 */ 84 printk(KERN_WARNING 85 "nilfs_get_block: a race condition " 86 "while inserting a data block. " 87 "(inode number=%lu, file block " 88 "offset=%llu)\n", 89 inode->i_ino, 90 (unsigned long long)blkoff); 91 err = 0; 92 } else if (err == -EINVAL) { 93 nilfs_error(inode->i_sb, __func__, 94 "broken bmap (inode=%lu)\n", 95 inode->i_ino); 96 err = -EIO; 97 } 98 nilfs_transaction_abort(inode->i_sb); 99 goto out; 100 } 101 nilfs_mark_inode_dirty(inode); 102 nilfs_transaction_commit(inode->i_sb); /* never fails */ 103 /* Error handling should be detailed */ 104 set_buffer_new(bh_result); 105 map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed 106 to proper value */ 107 } else if (ret == -ENOENT) { 108 /* not found is not error (e.g. hole); must return without 109 the mapped state flag. */ 110 ; 111 } else { 112 err = ret; 113 } 114 115 out: 116 return err; 117 } 118 119 /** 120 * nilfs_readpage() - implement readpage() method of nilfs_aops {} 121 * address_space_operations. 122 * @file - file struct of the file to be read 123 * @page - the page to be read 124 */ 125 static int nilfs_readpage(struct file *file, struct page *page) 126 { 127 return mpage_readpage(page, nilfs_get_block); 128 } 129 130 /** 131 * nilfs_readpages() - implement readpages() method of nilfs_aops {} 132 * address_space_operations. 133 * @file - file struct of the file to be read 134 * @mapping - address_space struct used for reading multiple pages 135 * @pages - the pages to be read 136 * @nr_pages - number of pages to be read 137 */ 138 static int nilfs_readpages(struct file *file, struct address_space *mapping, 139 struct list_head *pages, unsigned nr_pages) 140 { 141 return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block); 142 } 143 144 static int nilfs_writepages(struct address_space *mapping, 145 struct writeback_control *wbc) 146 { 147 struct inode *inode = mapping->host; 148 int err = 0; 149 150 if (wbc->sync_mode == WB_SYNC_ALL) 151 err = nilfs_construct_dsync_segment(inode->i_sb, inode, 152 wbc->range_start, 153 wbc->range_end); 154 return err; 155 } 156 157 static int nilfs_writepage(struct page *page, struct writeback_control *wbc) 158 { 159 struct inode *inode = page->mapping->host; 160 int err; 161 162 redirty_page_for_writepage(wbc, page); 163 unlock_page(page); 164 165 if (wbc->sync_mode == WB_SYNC_ALL) { 166 err = nilfs_construct_segment(inode->i_sb); 167 if (unlikely(err)) 168 return err; 169 } else if (wbc->for_reclaim) 170 nilfs_flush_segment(inode->i_sb, inode->i_ino); 171 172 return 0; 173 } 174 175 static int nilfs_set_page_dirty(struct page *page) 176 { 177 int ret = __set_page_dirty_buffers(page); 178 179 if (ret) { 180 struct inode *inode = page->mapping->host; 181 struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); 182 unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits); 183 184 nilfs_set_file_dirty(sbi, inode, nr_dirty); 185 } 186 return ret; 187 } 188 189 static int nilfs_write_begin(struct file *file, struct address_space *mapping, 190 loff_t pos, unsigned len, unsigned flags, 191 struct page **pagep, void **fsdata) 192 193 { 194 struct inode *inode = mapping->host; 195 int err = nilfs_transaction_begin(inode->i_sb, NULL, 1); 196 197 if (unlikely(err)) 198 return err; 199 200 *pagep = NULL; 201 err = block_write_begin(file, mapping, pos, len, flags, pagep, 202 fsdata, nilfs_get_block); 203 if (unlikely(err)) 204 nilfs_transaction_abort(inode->i_sb); 205 return err; 206 } 207 208 static int nilfs_write_end(struct file *file, struct address_space *mapping, 209 loff_t pos, unsigned len, unsigned copied, 210 struct page *page, void *fsdata) 211 { 212 struct inode *inode = mapping->host; 213 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 214 unsigned nr_dirty; 215 int err; 216 217 nr_dirty = nilfs_page_count_clean_buffers(page, start, 218 start + copied); 219 copied = generic_write_end(file, mapping, pos, len, copied, page, 220 fsdata); 221 nilfs_set_file_dirty(NILFS_SB(inode->i_sb), inode, nr_dirty); 222 err = nilfs_transaction_commit(inode->i_sb); 223 return err ? : copied; 224 } 225 226 static ssize_t 227 nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, 228 loff_t offset, unsigned long nr_segs) 229 { 230 struct file *file = iocb->ki_filp; 231 struct inode *inode = file->f_mapping->host; 232 ssize_t size; 233 234 if (rw == WRITE) 235 return 0; 236 237 /* Needs synchronization with the cleaner */ 238 size = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 239 offset, nr_segs, nilfs_get_block, NULL); 240 return size; 241 } 242 243 const struct address_space_operations nilfs_aops = { 244 .writepage = nilfs_writepage, 245 .readpage = nilfs_readpage, 246 .sync_page = block_sync_page, 247 .writepages = nilfs_writepages, 248 .set_page_dirty = nilfs_set_page_dirty, 249 .readpages = nilfs_readpages, 250 .write_begin = nilfs_write_begin, 251 .write_end = nilfs_write_end, 252 /* .releasepage = nilfs_releasepage, */ 253 .invalidatepage = block_invalidatepage, 254 .direct_IO = nilfs_direct_IO, 255 .is_partially_uptodate = block_is_partially_uptodate, 256 }; 257 258 struct inode *nilfs_new_inode(struct inode *dir, int mode) 259 { 260 struct super_block *sb = dir->i_sb; 261 struct nilfs_sb_info *sbi = NILFS_SB(sb); 262 struct inode *inode; 263 struct nilfs_inode_info *ii; 264 int err = -ENOMEM; 265 ino_t ino; 266 267 inode = new_inode(sb); 268 if (unlikely(!inode)) 269 goto failed; 270 271 mapping_set_gfp_mask(inode->i_mapping, 272 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); 273 274 ii = NILFS_I(inode); 275 ii->i_state = 1 << NILFS_I_NEW; 276 277 err = nilfs_ifile_create_inode(sbi->s_ifile, &ino, &ii->i_bh); 278 if (unlikely(err)) 279 goto failed_ifile_create_inode; 280 /* reference count of i_bh inherits from nilfs_mdt_read_block() */ 281 282 atomic_inc(&sbi->s_inodes_count); 283 inode_init_owner(inode, dir, mode); 284 inode->i_ino = ino; 285 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 286 287 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { 288 err = nilfs_bmap_read(ii->i_bmap, NULL); 289 if (err < 0) 290 goto failed_bmap; 291 292 set_bit(NILFS_I_BMAP, &ii->i_state); 293 /* No lock is needed; iget() ensures it. */ 294 } 295 296 ii->i_flags = NILFS_I(dir)->i_flags; 297 if (S_ISLNK(mode)) 298 ii->i_flags &= ~(NILFS_IMMUTABLE_FL | NILFS_APPEND_FL); 299 if (!S_ISDIR(mode)) 300 ii->i_flags &= ~NILFS_DIRSYNC_FL; 301 302 /* ii->i_file_acl = 0; */ 303 /* ii->i_dir_acl = 0; */ 304 ii->i_dir_start_lookup = 0; 305 ii->i_cno = 0; 306 nilfs_set_inode_flags(inode); 307 spin_lock(&sbi->s_next_gen_lock); 308 inode->i_generation = sbi->s_next_generation++; 309 spin_unlock(&sbi->s_next_gen_lock); 310 insert_inode_hash(inode); 311 312 err = nilfs_init_acl(inode, dir); 313 if (unlikely(err)) 314 goto failed_acl; /* never occur. When supporting 315 nilfs_init_acl(), proper cancellation of 316 above jobs should be considered */ 317 318 return inode; 319 320 failed_acl: 321 failed_bmap: 322 inode->i_nlink = 0; 323 iput(inode); /* raw_inode will be deleted through 324 generic_delete_inode() */ 325 goto failed; 326 327 failed_ifile_create_inode: 328 make_bad_inode(inode); 329 iput(inode); /* if i_nlink == 1, generic_forget_inode() will be 330 called */ 331 failed: 332 return ERR_PTR(err); 333 } 334 335 void nilfs_free_inode(struct inode *inode) 336 { 337 struct super_block *sb = inode->i_sb; 338 struct nilfs_sb_info *sbi = NILFS_SB(sb); 339 340 clear_inode(inode); 341 /* XXX: check error code? Is there any thing I can do? */ 342 (void) nilfs_ifile_delete_inode(sbi->s_ifile, inode->i_ino); 343 atomic_dec(&sbi->s_inodes_count); 344 } 345 346 void nilfs_set_inode_flags(struct inode *inode) 347 { 348 unsigned int flags = NILFS_I(inode)->i_flags; 349 350 inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | 351 S_DIRSYNC); 352 if (flags & NILFS_SYNC_FL) 353 inode->i_flags |= S_SYNC; 354 if (flags & NILFS_APPEND_FL) 355 inode->i_flags |= S_APPEND; 356 if (flags & NILFS_IMMUTABLE_FL) 357 inode->i_flags |= S_IMMUTABLE; 358 #ifndef NILFS_ATIME_DISABLE 359 if (flags & NILFS_NOATIME_FL) 360 #endif 361 inode->i_flags |= S_NOATIME; 362 if (flags & NILFS_DIRSYNC_FL) 363 inode->i_flags |= S_DIRSYNC; 364 mapping_set_gfp_mask(inode->i_mapping, 365 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); 366 } 367 368 int nilfs_read_inode_common(struct inode *inode, 369 struct nilfs_inode *raw_inode) 370 { 371 struct nilfs_inode_info *ii = NILFS_I(inode); 372 int err; 373 374 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 375 inode->i_uid = (uid_t)le32_to_cpu(raw_inode->i_uid); 376 inode->i_gid = (gid_t)le32_to_cpu(raw_inode->i_gid); 377 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); 378 inode->i_size = le64_to_cpu(raw_inode->i_size); 379 inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 380 inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime); 381 inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 382 inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 383 inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec); 384 inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 385 if (inode->i_nlink == 0 && inode->i_mode == 0) 386 return -EINVAL; /* this inode is deleted */ 387 388 inode->i_blocks = le64_to_cpu(raw_inode->i_blocks); 389 ii->i_flags = le32_to_cpu(raw_inode->i_flags); 390 #if 0 391 ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl); 392 ii->i_dir_acl = S_ISREG(inode->i_mode) ? 393 0 : le32_to_cpu(raw_inode->i_dir_acl); 394 #endif 395 ii->i_dir_start_lookup = 0; 396 ii->i_cno = 0; 397 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 398 399 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 400 S_ISLNK(inode->i_mode)) { 401 err = nilfs_bmap_read(ii->i_bmap, raw_inode); 402 if (err < 0) 403 return err; 404 set_bit(NILFS_I_BMAP, &ii->i_state); 405 /* No lock is needed; iget() ensures it. */ 406 } 407 return 0; 408 } 409 410 static int __nilfs_read_inode(struct super_block *sb, unsigned long ino, 411 struct inode *inode) 412 { 413 struct nilfs_sb_info *sbi = NILFS_SB(sb); 414 struct inode *dat = nilfs_dat_inode(sbi->s_nilfs); 415 struct buffer_head *bh; 416 struct nilfs_inode *raw_inode; 417 int err; 418 419 down_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ 420 err = nilfs_ifile_get_inode_block(sbi->s_ifile, ino, &bh); 421 if (unlikely(err)) 422 goto bad_inode; 423 424 raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, bh); 425 426 err = nilfs_read_inode_common(inode, raw_inode); 427 if (err) 428 goto failed_unmap; 429 430 if (S_ISREG(inode->i_mode)) { 431 inode->i_op = &nilfs_file_inode_operations; 432 inode->i_fop = &nilfs_file_operations; 433 inode->i_mapping->a_ops = &nilfs_aops; 434 } else if (S_ISDIR(inode->i_mode)) { 435 inode->i_op = &nilfs_dir_inode_operations; 436 inode->i_fop = &nilfs_dir_operations; 437 inode->i_mapping->a_ops = &nilfs_aops; 438 } else if (S_ISLNK(inode->i_mode)) { 439 inode->i_op = &nilfs_symlink_inode_operations; 440 inode->i_mapping->a_ops = &nilfs_aops; 441 } else { 442 inode->i_op = &nilfs_special_inode_operations; 443 init_special_inode( 444 inode, inode->i_mode, 445 huge_decode_dev(le64_to_cpu(raw_inode->i_device_code))); 446 } 447 nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh); 448 brelse(bh); 449 up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ 450 nilfs_set_inode_flags(inode); 451 return 0; 452 453 failed_unmap: 454 nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh); 455 brelse(bh); 456 457 bad_inode: 458 up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ 459 return err; 460 } 461 462 struct inode *nilfs_iget(struct super_block *sb, unsigned long ino) 463 { 464 struct inode *inode; 465 int err; 466 467 inode = iget_locked(sb, ino); 468 if (unlikely(!inode)) 469 return ERR_PTR(-ENOMEM); 470 if (!(inode->i_state & I_NEW)) 471 return inode; 472 473 err = __nilfs_read_inode(sb, ino, inode); 474 if (unlikely(err)) { 475 iget_failed(inode); 476 return ERR_PTR(err); 477 } 478 unlock_new_inode(inode); 479 return inode; 480 } 481 482 void nilfs_write_inode_common(struct inode *inode, 483 struct nilfs_inode *raw_inode, int has_bmap) 484 { 485 struct nilfs_inode_info *ii = NILFS_I(inode); 486 487 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 488 raw_inode->i_uid = cpu_to_le32(inode->i_uid); 489 raw_inode->i_gid = cpu_to_le32(inode->i_gid); 490 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 491 raw_inode->i_size = cpu_to_le64(inode->i_size); 492 raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); 493 raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); 494 raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); 495 raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); 496 raw_inode->i_blocks = cpu_to_le64(inode->i_blocks); 497 498 raw_inode->i_flags = cpu_to_le32(ii->i_flags); 499 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 500 501 if (has_bmap) 502 nilfs_bmap_write(ii->i_bmap, raw_inode); 503 else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) 504 raw_inode->i_device_code = 505 cpu_to_le64(huge_encode_dev(inode->i_rdev)); 506 /* When extending inode, nilfs->ns_inode_size should be checked 507 for substitutions of appended fields */ 508 } 509 510 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh) 511 { 512 ino_t ino = inode->i_ino; 513 struct nilfs_inode_info *ii = NILFS_I(inode); 514 struct super_block *sb = inode->i_sb; 515 struct nilfs_sb_info *sbi = NILFS_SB(sb); 516 struct nilfs_inode *raw_inode; 517 518 raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, ibh); 519 520 if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state)) 521 memset(raw_inode, 0, NILFS_MDT(sbi->s_ifile)->mi_entry_size); 522 set_bit(NILFS_I_INODE_DIRTY, &ii->i_state); 523 524 nilfs_write_inode_common(inode, raw_inode, 0); 525 /* XXX: call with has_bmap = 0 is a workaround to avoid 526 deadlock of bmap. This delays update of i_bmap to just 527 before writing */ 528 nilfs_ifile_unmap_inode(sbi->s_ifile, ino, ibh); 529 } 530 531 #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */ 532 533 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii, 534 unsigned long from) 535 { 536 unsigned long b; 537 int ret; 538 539 if (!test_bit(NILFS_I_BMAP, &ii->i_state)) 540 return; 541 repeat: 542 ret = nilfs_bmap_last_key(ii->i_bmap, &b); 543 if (ret == -ENOENT) 544 return; 545 else if (ret < 0) 546 goto failed; 547 548 if (b < from) 549 return; 550 551 b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from); 552 ret = nilfs_bmap_truncate(ii->i_bmap, b); 553 nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb); 554 if (!ret || (ret == -ENOMEM && 555 nilfs_bmap_truncate(ii->i_bmap, b) == 0)) 556 goto repeat; 557 558 failed: 559 if (ret == -EINVAL) 560 nilfs_error(ii->vfs_inode.i_sb, __func__, 561 "bmap is broken (ino=%lu)", ii->vfs_inode.i_ino); 562 else 563 nilfs_warning(ii->vfs_inode.i_sb, __func__, 564 "failed to truncate bmap (ino=%lu, err=%d)", 565 ii->vfs_inode.i_ino, ret); 566 } 567 568 void nilfs_truncate(struct inode *inode) 569 { 570 unsigned long blkoff; 571 unsigned int blocksize; 572 struct nilfs_transaction_info ti; 573 struct super_block *sb = inode->i_sb; 574 struct nilfs_inode_info *ii = NILFS_I(inode); 575 576 if (!test_bit(NILFS_I_BMAP, &ii->i_state)) 577 return; 578 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 579 return; 580 581 blocksize = sb->s_blocksize; 582 blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits; 583 nilfs_transaction_begin(sb, &ti, 0); /* never fails */ 584 585 block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block); 586 587 nilfs_truncate_bmap(ii, blkoff); 588 589 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 590 if (IS_SYNC(inode)) 591 nilfs_set_transaction_flag(NILFS_TI_SYNC); 592 593 nilfs_mark_inode_dirty(inode); 594 nilfs_set_file_dirty(NILFS_SB(sb), inode, 0); 595 nilfs_transaction_commit(sb); 596 /* May construct a logical segment and may fail in sync mode. 597 But truncate has no return value. */ 598 } 599 600 void nilfs_delete_inode(struct inode *inode) 601 { 602 struct nilfs_transaction_info ti; 603 struct super_block *sb = inode->i_sb; 604 struct nilfs_inode_info *ii = NILFS_I(inode); 605 606 if (unlikely(is_bad_inode(inode))) { 607 if (inode->i_data.nrpages) 608 truncate_inode_pages(&inode->i_data, 0); 609 clear_inode(inode); 610 return; 611 } 612 nilfs_transaction_begin(sb, &ti, 0); /* never fails */ 613 614 if (inode->i_data.nrpages) 615 truncate_inode_pages(&inode->i_data, 0); 616 617 nilfs_truncate_bmap(ii, 0); 618 nilfs_mark_inode_dirty(inode); 619 nilfs_free_inode(inode); 620 /* nilfs_free_inode() marks inode buffer dirty */ 621 if (IS_SYNC(inode)) 622 nilfs_set_transaction_flag(NILFS_TI_SYNC); 623 nilfs_transaction_commit(sb); 624 /* May construct a logical segment and may fail in sync mode. 625 But delete_inode has no return value. */ 626 } 627 628 int nilfs_setattr(struct dentry *dentry, struct iattr *iattr) 629 { 630 struct nilfs_transaction_info ti; 631 struct inode *inode = dentry->d_inode; 632 struct super_block *sb = inode->i_sb; 633 int err; 634 635 err = inode_change_ok(inode, iattr); 636 if (err) 637 return err; 638 639 err = nilfs_transaction_begin(sb, &ti, 0); 640 if (unlikely(err)) 641 return err; 642 err = inode_setattr(inode, iattr); 643 if (!err && (iattr->ia_valid & ATTR_MODE)) 644 err = nilfs_acl_chmod(inode); 645 if (likely(!err)) 646 err = nilfs_transaction_commit(sb); 647 else 648 nilfs_transaction_abort(sb); 649 650 return err; 651 } 652 653 int nilfs_load_inode_block(struct nilfs_sb_info *sbi, struct inode *inode, 654 struct buffer_head **pbh) 655 { 656 struct nilfs_inode_info *ii = NILFS_I(inode); 657 int err; 658 659 spin_lock(&sbi->s_inode_lock); 660 if (ii->i_bh == NULL) { 661 spin_unlock(&sbi->s_inode_lock); 662 err = nilfs_ifile_get_inode_block(sbi->s_ifile, inode->i_ino, 663 pbh); 664 if (unlikely(err)) 665 return err; 666 spin_lock(&sbi->s_inode_lock); 667 if (ii->i_bh == NULL) 668 ii->i_bh = *pbh; 669 else { 670 brelse(*pbh); 671 *pbh = ii->i_bh; 672 } 673 } else 674 *pbh = ii->i_bh; 675 676 get_bh(*pbh); 677 spin_unlock(&sbi->s_inode_lock); 678 return 0; 679 } 680 681 int nilfs_inode_dirty(struct inode *inode) 682 { 683 struct nilfs_inode_info *ii = NILFS_I(inode); 684 struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); 685 int ret = 0; 686 687 if (!list_empty(&ii->i_dirty)) { 688 spin_lock(&sbi->s_inode_lock); 689 ret = test_bit(NILFS_I_DIRTY, &ii->i_state) || 690 test_bit(NILFS_I_BUSY, &ii->i_state); 691 spin_unlock(&sbi->s_inode_lock); 692 } 693 return ret; 694 } 695 696 int nilfs_set_file_dirty(struct nilfs_sb_info *sbi, struct inode *inode, 697 unsigned nr_dirty) 698 { 699 struct nilfs_inode_info *ii = NILFS_I(inode); 700 701 atomic_add(nr_dirty, &sbi->s_nilfs->ns_ndirtyblks); 702 703 if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state)) 704 return 0; 705 706 spin_lock(&sbi->s_inode_lock); 707 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) && 708 !test_bit(NILFS_I_BUSY, &ii->i_state)) { 709 /* Because this routine may race with nilfs_dispose_list(), 710 we have to check NILFS_I_QUEUED here, too. */ 711 if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) { 712 /* This will happen when somebody is freeing 713 this inode. */ 714 nilfs_warning(sbi->s_super, __func__, 715 "cannot get inode (ino=%lu)\n", 716 inode->i_ino); 717 spin_unlock(&sbi->s_inode_lock); 718 return -EINVAL; /* NILFS_I_DIRTY may remain for 719 freeing inode */ 720 } 721 list_del(&ii->i_dirty); 722 list_add_tail(&ii->i_dirty, &sbi->s_dirty_files); 723 set_bit(NILFS_I_QUEUED, &ii->i_state); 724 } 725 spin_unlock(&sbi->s_inode_lock); 726 return 0; 727 } 728 729 int nilfs_mark_inode_dirty(struct inode *inode) 730 { 731 struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); 732 struct buffer_head *ibh; 733 int err; 734 735 err = nilfs_load_inode_block(sbi, inode, &ibh); 736 if (unlikely(err)) { 737 nilfs_warning(inode->i_sb, __func__, 738 "failed to reget inode block.\n"); 739 return err; 740 } 741 nilfs_update_inode(inode, ibh); 742 nilfs_mdt_mark_buffer_dirty(ibh); 743 nilfs_mdt_mark_dirty(sbi->s_ifile); 744 brelse(ibh); 745 return 0; 746 } 747 748 /** 749 * nilfs_dirty_inode - reflect changes on given inode to an inode block. 750 * @inode: inode of the file to be registered. 751 * 752 * nilfs_dirty_inode() loads a inode block containing the specified 753 * @inode and copies data from a nilfs_inode to a corresponding inode 754 * entry in the inode block. This operation is excluded from the segment 755 * construction. This function can be called both as a single operation 756 * and as a part of indivisible file operations. 757 */ 758 void nilfs_dirty_inode(struct inode *inode) 759 { 760 struct nilfs_transaction_info ti; 761 762 if (is_bad_inode(inode)) { 763 nilfs_warning(inode->i_sb, __func__, 764 "tried to mark bad_inode dirty. ignored.\n"); 765 dump_stack(); 766 return; 767 } 768 nilfs_transaction_begin(inode->i_sb, &ti, 0); 769 nilfs_mark_inode_dirty(inode); 770 nilfs_transaction_commit(inode->i_sb); /* never fails */ 771 } 772