1 /* 2 * inode.c - NILFS inode operations. 3 * 4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 19 * 20 * Written by Ryusuke Konishi <ryusuke@osrg.net> 21 * 22 */ 23 24 #include <linux/buffer_head.h> 25 #include <linux/mpage.h> 26 #include <linux/writeback.h> 27 #include <linux/uio.h> 28 #include "nilfs.h" 29 #include "segment.h" 30 #include "page.h" 31 #include "mdt.h" 32 #include "cpfile.h" 33 #include "ifile.h" 34 35 36 /** 37 * nilfs_get_block() - get a file block on the filesystem (callback function) 38 * @inode - inode struct of the target file 39 * @blkoff - file block number 40 * @bh_result - buffer head to be mapped on 41 * @create - indicate whether allocating the block or not when it has not 42 * been allocated yet. 43 * 44 * This function does not issue actual read request of the specified data 45 * block. It is done by VFS. 46 */ 47 int nilfs_get_block(struct inode *inode, sector_t blkoff, 48 struct buffer_head *bh_result, int create) 49 { 50 struct nilfs_inode_info *ii = NILFS_I(inode); 51 __u64 blknum = 0; 52 int err = 0, ret; 53 struct inode *dat = nilfs_dat_inode(NILFS_I_NILFS(inode)); 54 unsigned maxblocks = bh_result->b_size >> inode->i_blkbits; 55 56 down_read(&NILFS_MDT(dat)->mi_sem); 57 ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks); 58 up_read(&NILFS_MDT(dat)->mi_sem); 59 if (ret >= 0) { /* found */ 60 map_bh(bh_result, inode->i_sb, blknum); 61 if (ret > 0) 62 bh_result->b_size = (ret << inode->i_blkbits); 63 goto out; 64 } 65 /* data block was not found */ 66 if (ret == -ENOENT && create) { 67 struct nilfs_transaction_info ti; 68 69 bh_result->b_blocknr = 0; 70 err = nilfs_transaction_begin(inode->i_sb, &ti, 1); 71 if (unlikely(err)) 72 goto out; 73 err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff, 74 (unsigned long)bh_result); 75 if (unlikely(err != 0)) { 76 if (err == -EEXIST) { 77 /* 78 * The get_block() function could be called 79 * from multiple callers for an inode. 80 * However, the page having this block must 81 * be locked in this case. 82 */ 83 printk(KERN_WARNING 84 "nilfs_get_block: a race condition " 85 "while inserting a data block. " 86 "(inode number=%lu, file block " 87 "offset=%llu)\n", 88 inode->i_ino, 89 (unsigned long long)blkoff); 90 err = 0; 91 } else if (err == -EINVAL) { 92 nilfs_error(inode->i_sb, __func__, 93 "broken bmap (inode=%lu)\n", 94 inode->i_ino); 95 err = -EIO; 96 } 97 nilfs_transaction_abort(inode->i_sb); 98 goto out; 99 } 100 nilfs_transaction_commit(inode->i_sb); /* never fails */ 101 /* Error handling should be detailed */ 102 set_buffer_new(bh_result); 103 map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed 104 to proper value */ 105 } else if (ret == -ENOENT) { 106 /* not found is not error (e.g. hole); must return without 107 the mapped state flag. */ 108 ; 109 } else { 110 err = ret; 111 } 112 113 out: 114 return err; 115 } 116 117 /** 118 * nilfs_readpage() - implement readpage() method of nilfs_aops {} 119 * address_space_operations. 120 * @file - file struct of the file to be read 121 * @page - the page to be read 122 */ 123 static int nilfs_readpage(struct file *file, struct page *page) 124 { 125 return mpage_readpage(page, nilfs_get_block); 126 } 127 128 /** 129 * nilfs_readpages() - implement readpages() method of nilfs_aops {} 130 * address_space_operations. 131 * @file - file struct of the file to be read 132 * @mapping - address_space struct used for reading multiple pages 133 * @pages - the pages to be read 134 * @nr_pages - number of pages to be read 135 */ 136 static int nilfs_readpages(struct file *file, struct address_space *mapping, 137 struct list_head *pages, unsigned nr_pages) 138 { 139 return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block); 140 } 141 142 static int nilfs_writepages(struct address_space *mapping, 143 struct writeback_control *wbc) 144 { 145 struct inode *inode = mapping->host; 146 int err = 0; 147 148 if (wbc->sync_mode == WB_SYNC_ALL) 149 err = nilfs_construct_dsync_segment(inode->i_sb, inode, 150 wbc->range_start, 151 wbc->range_end); 152 return err; 153 } 154 155 static int nilfs_writepage(struct page *page, struct writeback_control *wbc) 156 { 157 struct inode *inode = page->mapping->host; 158 int err; 159 160 redirty_page_for_writepage(wbc, page); 161 unlock_page(page); 162 163 if (wbc->sync_mode == WB_SYNC_ALL) { 164 err = nilfs_construct_segment(inode->i_sb); 165 if (unlikely(err)) 166 return err; 167 } else if (wbc->for_reclaim) 168 nilfs_flush_segment(inode->i_sb, inode->i_ino); 169 170 return 0; 171 } 172 173 static int nilfs_set_page_dirty(struct page *page) 174 { 175 int ret = __set_page_dirty_buffers(page); 176 177 if (ret) { 178 struct inode *inode = page->mapping->host; 179 struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); 180 unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits); 181 182 nilfs_set_file_dirty(sbi, inode, nr_dirty); 183 } 184 return ret; 185 } 186 187 static int nilfs_write_begin(struct file *file, struct address_space *mapping, 188 loff_t pos, unsigned len, unsigned flags, 189 struct page **pagep, void **fsdata) 190 191 { 192 struct inode *inode = mapping->host; 193 int err = nilfs_transaction_begin(inode->i_sb, NULL, 1); 194 195 if (unlikely(err)) 196 return err; 197 198 *pagep = NULL; 199 err = block_write_begin(file, mapping, pos, len, flags, pagep, 200 fsdata, nilfs_get_block); 201 if (unlikely(err)) 202 nilfs_transaction_abort(inode->i_sb); 203 return err; 204 } 205 206 static int nilfs_write_end(struct file *file, struct address_space *mapping, 207 loff_t pos, unsigned len, unsigned copied, 208 struct page *page, void *fsdata) 209 { 210 struct inode *inode = mapping->host; 211 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 212 unsigned nr_dirty; 213 int err; 214 215 nr_dirty = nilfs_page_count_clean_buffers(page, start, 216 start + copied); 217 copied = generic_write_end(file, mapping, pos, len, copied, page, 218 fsdata); 219 nilfs_set_file_dirty(NILFS_SB(inode->i_sb), inode, nr_dirty); 220 err = nilfs_transaction_commit(inode->i_sb); 221 return err ? : copied; 222 } 223 224 static ssize_t 225 nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, 226 loff_t offset, unsigned long nr_segs) 227 { 228 struct file *file = iocb->ki_filp; 229 struct inode *inode = file->f_mapping->host; 230 ssize_t size; 231 232 if (rw == WRITE) 233 return 0; 234 235 /* Needs synchronization with the cleaner */ 236 size = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 237 offset, nr_segs, nilfs_get_block, NULL); 238 return size; 239 } 240 241 struct address_space_operations nilfs_aops = { 242 .writepage = nilfs_writepage, 243 .readpage = nilfs_readpage, 244 .sync_page = block_sync_page, 245 .writepages = nilfs_writepages, 246 .set_page_dirty = nilfs_set_page_dirty, 247 .readpages = nilfs_readpages, 248 .write_begin = nilfs_write_begin, 249 .write_end = nilfs_write_end, 250 /* .releasepage = nilfs_releasepage, */ 251 .invalidatepage = block_invalidatepage, 252 .direct_IO = nilfs_direct_IO, 253 .is_partially_uptodate = block_is_partially_uptodate, 254 }; 255 256 struct inode *nilfs_new_inode(struct inode *dir, int mode) 257 { 258 struct super_block *sb = dir->i_sb; 259 struct nilfs_sb_info *sbi = NILFS_SB(sb); 260 struct inode *inode; 261 struct nilfs_inode_info *ii; 262 int err = -ENOMEM; 263 ino_t ino; 264 265 inode = new_inode(sb); 266 if (unlikely(!inode)) 267 goto failed; 268 269 mapping_set_gfp_mask(inode->i_mapping, 270 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); 271 272 ii = NILFS_I(inode); 273 ii->i_state = 1 << NILFS_I_NEW; 274 275 err = nilfs_ifile_create_inode(sbi->s_ifile, &ino, &ii->i_bh); 276 if (unlikely(err)) 277 goto failed_ifile_create_inode; 278 /* reference count of i_bh inherits from nilfs_mdt_read_block() */ 279 280 atomic_inc(&sbi->s_inodes_count); 281 282 inode->i_uid = current_fsuid(); 283 if (dir->i_mode & S_ISGID) { 284 inode->i_gid = dir->i_gid; 285 if (S_ISDIR(mode)) 286 mode |= S_ISGID; 287 } else 288 inode->i_gid = current_fsgid(); 289 290 inode->i_mode = mode; 291 inode->i_ino = ino; 292 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 293 294 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { 295 err = nilfs_bmap_read(ii->i_bmap, NULL); 296 if (err < 0) 297 goto failed_bmap; 298 299 set_bit(NILFS_I_BMAP, &ii->i_state); 300 /* No lock is needed; iget() ensures it. */ 301 } 302 303 ii->i_flags = NILFS_I(dir)->i_flags; 304 if (S_ISLNK(mode)) 305 ii->i_flags &= ~(NILFS_IMMUTABLE_FL | NILFS_APPEND_FL); 306 if (!S_ISDIR(mode)) 307 ii->i_flags &= ~NILFS_DIRSYNC_FL; 308 309 /* ii->i_file_acl = 0; */ 310 /* ii->i_dir_acl = 0; */ 311 ii->i_dir_start_lookup = 0; 312 #ifdef CONFIG_NILFS_FS_POSIX_ACL 313 ii->i_acl = NULL; 314 ii->i_default_acl = NULL; 315 #endif 316 ii->i_cno = 0; 317 nilfs_set_inode_flags(inode); 318 spin_lock(&sbi->s_next_gen_lock); 319 inode->i_generation = sbi->s_next_generation++; 320 spin_unlock(&sbi->s_next_gen_lock); 321 insert_inode_hash(inode); 322 323 err = nilfs_init_acl(inode, dir); 324 if (unlikely(err)) 325 goto failed_acl; /* never occur. When supporting 326 nilfs_init_acl(), proper cancellation of 327 above jobs should be considered */ 328 329 mark_inode_dirty(inode); 330 return inode; 331 332 failed_acl: 333 failed_bmap: 334 inode->i_nlink = 0; 335 iput(inode); /* raw_inode will be deleted through 336 generic_delete_inode() */ 337 goto failed; 338 339 failed_ifile_create_inode: 340 make_bad_inode(inode); 341 iput(inode); /* if i_nlink == 1, generic_forget_inode() will be 342 called */ 343 failed: 344 return ERR_PTR(err); 345 } 346 347 void nilfs_free_inode(struct inode *inode) 348 { 349 struct super_block *sb = inode->i_sb; 350 struct nilfs_sb_info *sbi = NILFS_SB(sb); 351 352 clear_inode(inode); 353 /* XXX: check error code? Is there any thing I can do? */ 354 (void) nilfs_ifile_delete_inode(sbi->s_ifile, inode->i_ino); 355 atomic_dec(&sbi->s_inodes_count); 356 } 357 358 void nilfs_set_inode_flags(struct inode *inode) 359 { 360 unsigned int flags = NILFS_I(inode)->i_flags; 361 362 inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | 363 S_DIRSYNC); 364 if (flags & NILFS_SYNC_FL) 365 inode->i_flags |= S_SYNC; 366 if (flags & NILFS_APPEND_FL) 367 inode->i_flags |= S_APPEND; 368 if (flags & NILFS_IMMUTABLE_FL) 369 inode->i_flags |= S_IMMUTABLE; 370 #ifndef NILFS_ATIME_DISABLE 371 if (flags & NILFS_NOATIME_FL) 372 #endif 373 inode->i_flags |= S_NOATIME; 374 if (flags & NILFS_DIRSYNC_FL) 375 inode->i_flags |= S_DIRSYNC; 376 mapping_set_gfp_mask(inode->i_mapping, 377 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); 378 } 379 380 int nilfs_read_inode_common(struct inode *inode, 381 struct nilfs_inode *raw_inode) 382 { 383 struct nilfs_inode_info *ii = NILFS_I(inode); 384 int err; 385 386 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 387 inode->i_uid = (uid_t)le32_to_cpu(raw_inode->i_uid); 388 inode->i_gid = (gid_t)le32_to_cpu(raw_inode->i_gid); 389 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); 390 inode->i_size = le64_to_cpu(raw_inode->i_size); 391 inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 392 inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime); 393 inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime); 394 inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 395 inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec); 396 inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); 397 if (inode->i_nlink == 0 && inode->i_mode == 0) 398 return -EINVAL; /* this inode is deleted */ 399 400 inode->i_blocks = le64_to_cpu(raw_inode->i_blocks); 401 ii->i_flags = le32_to_cpu(raw_inode->i_flags); 402 #if 0 403 ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl); 404 ii->i_dir_acl = S_ISREG(inode->i_mode) ? 405 0 : le32_to_cpu(raw_inode->i_dir_acl); 406 #endif 407 ii->i_cno = 0; 408 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 409 410 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 411 S_ISLNK(inode->i_mode)) { 412 err = nilfs_bmap_read(ii->i_bmap, raw_inode); 413 if (err < 0) 414 return err; 415 set_bit(NILFS_I_BMAP, &ii->i_state); 416 /* No lock is needed; iget() ensures it. */ 417 } 418 return 0; 419 } 420 421 static int __nilfs_read_inode(struct super_block *sb, unsigned long ino, 422 struct inode *inode) 423 { 424 struct nilfs_sb_info *sbi = NILFS_SB(sb); 425 struct inode *dat = nilfs_dat_inode(sbi->s_nilfs); 426 struct buffer_head *bh; 427 struct nilfs_inode *raw_inode; 428 int err; 429 430 down_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ 431 err = nilfs_ifile_get_inode_block(sbi->s_ifile, ino, &bh); 432 if (unlikely(err)) 433 goto bad_inode; 434 435 raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, bh); 436 437 #ifdef CONFIG_NILFS_FS_POSIX_ACL 438 ii->i_acl = NILFS_ACL_NOT_CACHED; 439 ii->i_default_acl = NILFS_ACL_NOT_CACHED; 440 #endif 441 if (nilfs_read_inode_common(inode, raw_inode)) 442 goto failed_unmap; 443 444 if (S_ISREG(inode->i_mode)) { 445 inode->i_op = &nilfs_file_inode_operations; 446 inode->i_fop = &nilfs_file_operations; 447 inode->i_mapping->a_ops = &nilfs_aops; 448 } else if (S_ISDIR(inode->i_mode)) { 449 inode->i_op = &nilfs_dir_inode_operations; 450 inode->i_fop = &nilfs_dir_operations; 451 inode->i_mapping->a_ops = &nilfs_aops; 452 } else if (S_ISLNK(inode->i_mode)) { 453 inode->i_op = &nilfs_symlink_inode_operations; 454 inode->i_mapping->a_ops = &nilfs_aops; 455 } else { 456 inode->i_op = &nilfs_special_inode_operations; 457 init_special_inode( 458 inode, inode->i_mode, 459 new_decode_dev(le64_to_cpu(raw_inode->i_device_code))); 460 } 461 nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh); 462 brelse(bh); 463 up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ 464 nilfs_set_inode_flags(inode); 465 return 0; 466 467 failed_unmap: 468 nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh); 469 brelse(bh); 470 471 bad_inode: 472 up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ 473 return err; 474 } 475 476 struct inode *nilfs_iget(struct super_block *sb, unsigned long ino) 477 { 478 struct inode *inode; 479 int err; 480 481 inode = iget_locked(sb, ino); 482 if (unlikely(!inode)) 483 return ERR_PTR(-ENOMEM); 484 if (!(inode->i_state & I_NEW)) 485 return inode; 486 487 err = __nilfs_read_inode(sb, ino, inode); 488 if (unlikely(err)) { 489 iget_failed(inode); 490 return ERR_PTR(err); 491 } 492 unlock_new_inode(inode); 493 return inode; 494 } 495 496 void nilfs_write_inode_common(struct inode *inode, 497 struct nilfs_inode *raw_inode, int has_bmap) 498 { 499 struct nilfs_inode_info *ii = NILFS_I(inode); 500 501 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 502 raw_inode->i_uid = cpu_to_le32(inode->i_uid); 503 raw_inode->i_gid = cpu_to_le32(inode->i_gid); 504 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 505 raw_inode->i_size = cpu_to_le64(inode->i_size); 506 raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); 507 raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); 508 raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); 509 raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); 510 raw_inode->i_blocks = cpu_to_le64(inode->i_blocks); 511 512 raw_inode->i_flags = cpu_to_le32(ii->i_flags); 513 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 514 515 if (has_bmap) 516 nilfs_bmap_write(ii->i_bmap, raw_inode); 517 else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) 518 raw_inode->i_device_code = 519 cpu_to_le64(new_encode_dev(inode->i_rdev)); 520 /* When extending inode, nilfs->ns_inode_size should be checked 521 for substitutions of appended fields */ 522 } 523 524 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh) 525 { 526 ino_t ino = inode->i_ino; 527 struct nilfs_inode_info *ii = NILFS_I(inode); 528 struct super_block *sb = inode->i_sb; 529 struct nilfs_sb_info *sbi = NILFS_SB(sb); 530 struct nilfs_inode *raw_inode; 531 532 raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, ibh); 533 534 /* The buffer is guarded with lock_buffer() by the caller */ 535 if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state)) 536 memset(raw_inode, 0, NILFS_MDT(sbi->s_ifile)->mi_entry_size); 537 set_bit(NILFS_I_INODE_DIRTY, &ii->i_state); 538 539 nilfs_write_inode_common(inode, raw_inode, 0); 540 /* XXX: call with has_bmap = 0 is a workaround to avoid 541 deadlock of bmap. This delays update of i_bmap to just 542 before writing */ 543 nilfs_ifile_unmap_inode(sbi->s_ifile, ino, ibh); 544 } 545 546 #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */ 547 548 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii, 549 unsigned long from) 550 { 551 unsigned long b; 552 int ret; 553 554 if (!test_bit(NILFS_I_BMAP, &ii->i_state)) 555 return; 556 repeat: 557 ret = nilfs_bmap_last_key(ii->i_bmap, &b); 558 if (ret == -ENOENT) 559 return; 560 else if (ret < 0) 561 goto failed; 562 563 if (b < from) 564 return; 565 566 b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from); 567 ret = nilfs_bmap_truncate(ii->i_bmap, b); 568 nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb); 569 if (!ret || (ret == -ENOMEM && 570 nilfs_bmap_truncate(ii->i_bmap, b) == 0)) 571 goto repeat; 572 573 failed: 574 if (ret == -EINVAL) 575 nilfs_error(ii->vfs_inode.i_sb, __func__, 576 "bmap is broken (ino=%lu)", ii->vfs_inode.i_ino); 577 else 578 nilfs_warning(ii->vfs_inode.i_sb, __func__, 579 "failed to truncate bmap (ino=%lu, err=%d)", 580 ii->vfs_inode.i_ino, ret); 581 } 582 583 void nilfs_truncate(struct inode *inode) 584 { 585 unsigned long blkoff; 586 unsigned int blocksize; 587 struct nilfs_transaction_info ti; 588 struct super_block *sb = inode->i_sb; 589 struct nilfs_inode_info *ii = NILFS_I(inode); 590 591 if (!test_bit(NILFS_I_BMAP, &ii->i_state)) 592 return; 593 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 594 return; 595 596 blocksize = sb->s_blocksize; 597 blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits; 598 nilfs_transaction_begin(sb, &ti, 0); /* never fails */ 599 600 block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block); 601 602 nilfs_truncate_bmap(ii, blkoff); 603 604 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 605 if (IS_SYNC(inode)) 606 nilfs_set_transaction_flag(NILFS_TI_SYNC); 607 608 nilfs_set_file_dirty(NILFS_SB(sb), inode, 0); 609 nilfs_transaction_commit(sb); 610 /* May construct a logical segment and may fail in sync mode. 611 But truncate has no return value. */ 612 } 613 614 void nilfs_delete_inode(struct inode *inode) 615 { 616 struct nilfs_transaction_info ti; 617 struct super_block *sb = inode->i_sb; 618 struct nilfs_inode_info *ii = NILFS_I(inode); 619 620 if (unlikely(is_bad_inode(inode))) { 621 if (inode->i_data.nrpages) 622 truncate_inode_pages(&inode->i_data, 0); 623 clear_inode(inode); 624 return; 625 } 626 nilfs_transaction_begin(sb, &ti, 0); /* never fails */ 627 628 if (inode->i_data.nrpages) 629 truncate_inode_pages(&inode->i_data, 0); 630 631 nilfs_truncate_bmap(ii, 0); 632 nilfs_free_inode(inode); 633 /* nilfs_free_inode() marks inode buffer dirty */ 634 if (IS_SYNC(inode)) 635 nilfs_set_transaction_flag(NILFS_TI_SYNC); 636 nilfs_transaction_commit(sb); 637 /* May construct a logical segment and may fail in sync mode. 638 But delete_inode has no return value. */ 639 } 640 641 int nilfs_setattr(struct dentry *dentry, struct iattr *iattr) 642 { 643 struct nilfs_transaction_info ti; 644 struct inode *inode = dentry->d_inode; 645 struct super_block *sb = inode->i_sb; 646 int err; 647 648 err = inode_change_ok(inode, iattr); 649 if (err) 650 return err; 651 652 err = nilfs_transaction_begin(sb, &ti, 0); 653 if (unlikely(err)) 654 return err; 655 err = inode_setattr(inode, iattr); 656 if (!err && (iattr->ia_valid & ATTR_MODE)) 657 err = nilfs_acl_chmod(inode); 658 if (likely(!err)) 659 err = nilfs_transaction_commit(sb); 660 else 661 nilfs_transaction_abort(sb); 662 663 return err; 664 } 665 666 int nilfs_load_inode_block(struct nilfs_sb_info *sbi, struct inode *inode, 667 struct buffer_head **pbh) 668 { 669 struct nilfs_inode_info *ii = NILFS_I(inode); 670 int err; 671 672 spin_lock(&sbi->s_inode_lock); 673 /* Caller of this function MUST lock s_inode_lock */ 674 if (ii->i_bh == NULL) { 675 spin_unlock(&sbi->s_inode_lock); 676 err = nilfs_ifile_get_inode_block(sbi->s_ifile, inode->i_ino, 677 pbh); 678 if (unlikely(err)) 679 return err; 680 spin_lock(&sbi->s_inode_lock); 681 if (ii->i_bh == NULL) 682 ii->i_bh = *pbh; 683 else { 684 brelse(*pbh); 685 *pbh = ii->i_bh; 686 } 687 } else 688 *pbh = ii->i_bh; 689 690 get_bh(*pbh); 691 spin_unlock(&sbi->s_inode_lock); 692 return 0; 693 } 694 695 int nilfs_inode_dirty(struct inode *inode) 696 { 697 struct nilfs_inode_info *ii = NILFS_I(inode); 698 struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); 699 int ret = 0; 700 701 if (!list_empty(&ii->i_dirty)) { 702 spin_lock(&sbi->s_inode_lock); 703 ret = test_bit(NILFS_I_DIRTY, &ii->i_state) || 704 test_bit(NILFS_I_BUSY, &ii->i_state); 705 spin_unlock(&sbi->s_inode_lock); 706 } 707 return ret; 708 } 709 710 int nilfs_set_file_dirty(struct nilfs_sb_info *sbi, struct inode *inode, 711 unsigned nr_dirty) 712 { 713 struct nilfs_inode_info *ii = NILFS_I(inode); 714 715 atomic_add(nr_dirty, &sbi->s_nilfs->ns_ndirtyblks); 716 717 if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state)) 718 return 0; 719 720 spin_lock(&sbi->s_inode_lock); 721 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) && 722 !test_bit(NILFS_I_BUSY, &ii->i_state)) { 723 /* Because this routine may race with nilfs_dispose_list(), 724 we have to check NILFS_I_QUEUED here, too. */ 725 if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) { 726 /* This will happen when somebody is freeing 727 this inode. */ 728 nilfs_warning(sbi->s_super, __func__, 729 "cannot get inode (ino=%lu)\n", 730 inode->i_ino); 731 spin_unlock(&sbi->s_inode_lock); 732 return -EINVAL; /* NILFS_I_DIRTY may remain for 733 freeing inode */ 734 } 735 list_del(&ii->i_dirty); 736 list_add_tail(&ii->i_dirty, &sbi->s_dirty_files); 737 set_bit(NILFS_I_QUEUED, &ii->i_state); 738 } 739 spin_unlock(&sbi->s_inode_lock); 740 return 0; 741 } 742 743 int nilfs_mark_inode_dirty(struct inode *inode) 744 { 745 struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); 746 struct buffer_head *ibh; 747 int err; 748 749 err = nilfs_load_inode_block(sbi, inode, &ibh); 750 if (unlikely(err)) { 751 nilfs_warning(inode->i_sb, __func__, 752 "failed to reget inode block.\n"); 753 return err; 754 } 755 lock_buffer(ibh); 756 nilfs_update_inode(inode, ibh); 757 unlock_buffer(ibh); 758 nilfs_mdt_mark_buffer_dirty(ibh); 759 nilfs_mdt_mark_dirty(sbi->s_ifile); 760 brelse(ibh); 761 return 0; 762 } 763 764 /** 765 * nilfs_dirty_inode - reflect changes on given inode to an inode block. 766 * @inode: inode of the file to be registered. 767 * 768 * nilfs_dirty_inode() loads a inode block containing the specified 769 * @inode and copies data from a nilfs_inode to a corresponding inode 770 * entry in the inode block. This operation is excluded from the segment 771 * construction. This function can be called both as a single operation 772 * and as a part of indivisible file operations. 773 */ 774 void nilfs_dirty_inode(struct inode *inode) 775 { 776 struct nilfs_transaction_info ti; 777 778 if (is_bad_inode(inode)) { 779 nilfs_warning(inode->i_sb, __func__, 780 "tried to mark bad_inode dirty. ignored.\n"); 781 dump_stack(); 782 return; 783 } 784 nilfs_transaction_begin(inode->i_sb, &ti, 0); 785 nilfs_mark_inode_dirty(inode); 786 nilfs_transaction_commit(inode->i_sb); /* never fails */ 787 } 788