1 /* 2 * the_nilfs.c - the_nilfs shared structure. 3 * 4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 19 * 20 * Written by Ryusuke Konishi <ryusuke@osrg.net> 21 * 22 */ 23 24 #include <linux/buffer_head.h> 25 #include <linux/slab.h> 26 #include <linux/blkdev.h> 27 #include <linux/backing-dev.h> 28 #include <linux/crc32.h> 29 #include "nilfs.h" 30 #include "segment.h" 31 #include "alloc.h" 32 #include "cpfile.h" 33 #include "sufile.h" 34 #include "dat.h" 35 #include "segbuf.h" 36 37 38 static LIST_HEAD(nilfs_objects); 39 static DEFINE_SPINLOCK(nilfs_lock); 40 41 void nilfs_set_last_segment(struct the_nilfs *nilfs, 42 sector_t start_blocknr, u64 seq, __u64 cno) 43 { 44 spin_lock(&nilfs->ns_last_segment_lock); 45 nilfs->ns_last_pseg = start_blocknr; 46 nilfs->ns_last_seq = seq; 47 nilfs->ns_last_cno = cno; 48 spin_unlock(&nilfs->ns_last_segment_lock); 49 } 50 51 /** 52 * alloc_nilfs - allocate the_nilfs structure 53 * @bdev: block device to which the_nilfs is related 54 * 55 * alloc_nilfs() allocates memory for the_nilfs and 56 * initializes its reference count and locks. 57 * 58 * Return Value: On success, pointer to the_nilfs is returned. 59 * On error, NULL is returned. 60 */ 61 static struct the_nilfs *alloc_nilfs(struct block_device *bdev) 62 { 63 struct the_nilfs *nilfs; 64 65 nilfs = kzalloc(sizeof(*nilfs), GFP_KERNEL); 66 if (!nilfs) 67 return NULL; 68 69 nilfs->ns_bdev = bdev; 70 atomic_set(&nilfs->ns_count, 1); 71 atomic_set(&nilfs->ns_ndirtyblks, 0); 72 init_rwsem(&nilfs->ns_sem); 73 init_rwsem(&nilfs->ns_super_sem); 74 mutex_init(&nilfs->ns_mount_mutex); 75 init_rwsem(&nilfs->ns_writer_sem); 76 INIT_LIST_HEAD(&nilfs->ns_list); 77 INIT_LIST_HEAD(&nilfs->ns_supers); 78 spin_lock_init(&nilfs->ns_last_segment_lock); 79 nilfs->ns_gc_inodes_h = NULL; 80 init_rwsem(&nilfs->ns_segctor_sem); 81 82 return nilfs; 83 } 84 85 /** 86 * find_or_create_nilfs - find or create nilfs object 87 * @bdev: block device to which the_nilfs is related 88 * 89 * find_nilfs() looks up an existent nilfs object created on the 90 * device and gets the reference count of the object. If no nilfs object 91 * is found on the device, a new nilfs object is allocated. 92 * 93 * Return Value: On success, pointer to the nilfs object is returned. 94 * On error, NULL is returned. 95 */ 96 struct the_nilfs *find_or_create_nilfs(struct block_device *bdev) 97 { 98 struct the_nilfs *nilfs, *new = NULL; 99 100 retry: 101 spin_lock(&nilfs_lock); 102 list_for_each_entry(nilfs, &nilfs_objects, ns_list) { 103 if (nilfs->ns_bdev == bdev) { 104 get_nilfs(nilfs); 105 spin_unlock(&nilfs_lock); 106 if (new) 107 put_nilfs(new); 108 return nilfs; /* existing object */ 109 } 110 } 111 if (new) { 112 list_add_tail(&new->ns_list, &nilfs_objects); 113 spin_unlock(&nilfs_lock); 114 return new; /* new object */ 115 } 116 spin_unlock(&nilfs_lock); 117 118 new = alloc_nilfs(bdev); 119 if (new) 120 goto retry; 121 return NULL; /* insufficient memory */ 122 } 123 124 /** 125 * put_nilfs - release a reference to the_nilfs 126 * @nilfs: the_nilfs structure to be released 127 * 128 * put_nilfs() decrements a reference counter of the_nilfs. 129 * If the reference count reaches zero, the_nilfs is freed. 130 */ 131 void put_nilfs(struct the_nilfs *nilfs) 132 { 133 spin_lock(&nilfs_lock); 134 if (!atomic_dec_and_test(&nilfs->ns_count)) { 135 spin_unlock(&nilfs_lock); 136 return; 137 } 138 list_del_init(&nilfs->ns_list); 139 spin_unlock(&nilfs_lock); 140 141 /* 142 * Increment of ns_count never occurs below because the caller 143 * of get_nilfs() holds at least one reference to the_nilfs. 144 * Thus its exclusion control is not required here. 145 */ 146 147 might_sleep(); 148 if (nilfs_loaded(nilfs)) { 149 nilfs_mdt_destroy(nilfs->ns_sufile); 150 nilfs_mdt_destroy(nilfs->ns_cpfile); 151 nilfs_mdt_destroy(nilfs->ns_dat); 152 nilfs_mdt_destroy(nilfs->ns_gc_dat); 153 } 154 if (nilfs_init(nilfs)) { 155 nilfs_destroy_gccache(nilfs); 156 brelse(nilfs->ns_sbh[0]); 157 brelse(nilfs->ns_sbh[1]); 158 } 159 kfree(nilfs); 160 } 161 162 static int nilfs_load_super_root(struct the_nilfs *nilfs, 163 struct nilfs_sb_info *sbi, sector_t sr_block) 164 { 165 struct buffer_head *bh_sr; 166 struct nilfs_super_root *raw_sr; 167 struct nilfs_super_block **sbp = nilfs->ns_sbp; 168 unsigned dat_entry_size, segment_usage_size, checkpoint_size; 169 unsigned inode_size; 170 int err; 171 172 err = nilfs_read_super_root_block(sbi->s_super, sr_block, &bh_sr, 1); 173 if (unlikely(err)) 174 return err; 175 176 down_read(&nilfs->ns_sem); 177 dat_entry_size = le16_to_cpu(sbp[0]->s_dat_entry_size); 178 checkpoint_size = le16_to_cpu(sbp[0]->s_checkpoint_size); 179 segment_usage_size = le16_to_cpu(sbp[0]->s_segment_usage_size); 180 up_read(&nilfs->ns_sem); 181 182 inode_size = nilfs->ns_inode_size; 183 184 err = -ENOMEM; 185 nilfs->ns_dat = nilfs_dat_new(nilfs, dat_entry_size); 186 if (unlikely(!nilfs->ns_dat)) 187 goto failed; 188 189 nilfs->ns_gc_dat = nilfs_dat_new(nilfs, dat_entry_size); 190 if (unlikely(!nilfs->ns_gc_dat)) 191 goto failed_dat; 192 193 nilfs->ns_cpfile = nilfs_cpfile_new(nilfs, checkpoint_size); 194 if (unlikely(!nilfs->ns_cpfile)) 195 goto failed_gc_dat; 196 197 nilfs->ns_sufile = nilfs_sufile_new(nilfs, segment_usage_size); 198 if (unlikely(!nilfs->ns_sufile)) 199 goto failed_cpfile; 200 201 nilfs_mdt_set_shadow(nilfs->ns_dat, nilfs->ns_gc_dat); 202 203 err = nilfs_dat_read(nilfs->ns_dat, (void *)bh_sr->b_data + 204 NILFS_SR_DAT_OFFSET(inode_size)); 205 if (unlikely(err)) 206 goto failed_sufile; 207 208 err = nilfs_cpfile_read(nilfs->ns_cpfile, (void *)bh_sr->b_data + 209 NILFS_SR_CPFILE_OFFSET(inode_size)); 210 if (unlikely(err)) 211 goto failed_sufile; 212 213 err = nilfs_sufile_read(nilfs->ns_sufile, (void *)bh_sr->b_data + 214 NILFS_SR_SUFILE_OFFSET(inode_size)); 215 if (unlikely(err)) 216 goto failed_sufile; 217 218 raw_sr = (struct nilfs_super_root *)bh_sr->b_data; 219 nilfs->ns_nongc_ctime = le64_to_cpu(raw_sr->sr_nongc_ctime); 220 221 failed: 222 brelse(bh_sr); 223 return err; 224 225 failed_sufile: 226 nilfs_mdt_destroy(nilfs->ns_sufile); 227 228 failed_cpfile: 229 nilfs_mdt_destroy(nilfs->ns_cpfile); 230 231 failed_gc_dat: 232 nilfs_mdt_destroy(nilfs->ns_gc_dat); 233 234 failed_dat: 235 nilfs_mdt_destroy(nilfs->ns_dat); 236 goto failed; 237 } 238 239 static void nilfs_init_recovery_info(struct nilfs_recovery_info *ri) 240 { 241 memset(ri, 0, sizeof(*ri)); 242 INIT_LIST_HEAD(&ri->ri_used_segments); 243 } 244 245 static void nilfs_clear_recovery_info(struct nilfs_recovery_info *ri) 246 { 247 nilfs_dispose_segment_list(&ri->ri_used_segments); 248 } 249 250 /** 251 * load_nilfs - load and recover the nilfs 252 * @nilfs: the_nilfs structure to be released 253 * @sbi: nilfs_sb_info used to recover past segment 254 * 255 * load_nilfs() searches and load the latest super root, 256 * attaches the last segment, and does recovery if needed. 257 * The caller must call this exclusively for simultaneous mounts. 258 */ 259 int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi) 260 { 261 struct nilfs_recovery_info ri; 262 unsigned int s_flags = sbi->s_super->s_flags; 263 int really_read_only = bdev_read_only(nilfs->ns_bdev); 264 int valid_fs = nilfs_valid_fs(nilfs); 265 int err; 266 267 if (nilfs_loaded(nilfs)) { 268 if (valid_fs || 269 ((s_flags & MS_RDONLY) && nilfs_test_opt(sbi, NORECOVERY))) 270 return 0; 271 printk(KERN_ERR "NILFS: the filesystem is in an incomplete " 272 "recovery state.\n"); 273 return -EINVAL; 274 } 275 276 if (!valid_fs) { 277 printk(KERN_WARNING "NILFS warning: mounting unchecked fs\n"); 278 if (s_flags & MS_RDONLY) { 279 printk(KERN_INFO "NILFS: INFO: recovery " 280 "required for readonly filesystem.\n"); 281 printk(KERN_INFO "NILFS: write access will " 282 "be enabled during recovery.\n"); 283 } 284 } 285 286 nilfs_init_recovery_info(&ri); 287 288 err = nilfs_search_super_root(nilfs, sbi, &ri); 289 if (unlikely(err)) { 290 printk(KERN_ERR "NILFS: error searching super root.\n"); 291 goto failed; 292 } 293 294 err = nilfs_load_super_root(nilfs, sbi, ri.ri_super_root); 295 if (unlikely(err)) { 296 printk(KERN_ERR "NILFS: error loading super root.\n"); 297 goto failed; 298 } 299 300 if (valid_fs) 301 goto skip_recovery; 302 303 if (s_flags & MS_RDONLY) { 304 if (nilfs_test_opt(sbi, NORECOVERY)) { 305 printk(KERN_INFO "NILFS: norecovery option specified. " 306 "skipping roll-forward recovery\n"); 307 goto skip_recovery; 308 } 309 if (really_read_only) { 310 printk(KERN_ERR "NILFS: write access " 311 "unavailable, cannot proceed.\n"); 312 err = -EROFS; 313 goto failed_unload; 314 } 315 sbi->s_super->s_flags &= ~MS_RDONLY; 316 } else if (nilfs_test_opt(sbi, NORECOVERY)) { 317 printk(KERN_ERR "NILFS: recovery cancelled because norecovery " 318 "option was specified for a read/write mount\n"); 319 err = -EINVAL; 320 goto failed_unload; 321 } 322 323 err = nilfs_recover_logical_segments(nilfs, sbi, &ri); 324 if (err) 325 goto failed_unload; 326 327 down_write(&nilfs->ns_sem); 328 nilfs->ns_mount_state |= NILFS_VALID_FS; 329 nilfs->ns_sbp[0]->s_state = cpu_to_le16(nilfs->ns_mount_state); 330 err = nilfs_commit_super(sbi, 1); 331 up_write(&nilfs->ns_sem); 332 333 if (err) { 334 printk(KERN_ERR "NILFS: failed to update super block. " 335 "recovery unfinished.\n"); 336 goto failed_unload; 337 } 338 printk(KERN_INFO "NILFS: recovery complete.\n"); 339 340 skip_recovery: 341 set_nilfs_loaded(nilfs); 342 nilfs_clear_recovery_info(&ri); 343 sbi->s_super->s_flags = s_flags; 344 return 0; 345 346 failed_unload: 347 nilfs_mdt_destroy(nilfs->ns_cpfile); 348 nilfs_mdt_destroy(nilfs->ns_sufile); 349 nilfs_mdt_destroy(nilfs->ns_dat); 350 351 failed: 352 nilfs_clear_recovery_info(&ri); 353 sbi->s_super->s_flags = s_flags; 354 return err; 355 } 356 357 static unsigned long long nilfs_max_size(unsigned int blkbits) 358 { 359 unsigned int max_bits; 360 unsigned long long res = MAX_LFS_FILESIZE; /* page cache limit */ 361 362 max_bits = blkbits + NILFS_BMAP_KEY_BIT; /* bmap size limit */ 363 if (max_bits < 64) 364 res = min_t(unsigned long long, res, (1ULL << max_bits) - 1); 365 return res; 366 } 367 368 static int nilfs_store_disk_layout(struct the_nilfs *nilfs, 369 struct nilfs_super_block *sbp) 370 { 371 if (le32_to_cpu(sbp->s_rev_level) != NILFS_CURRENT_REV) { 372 printk(KERN_ERR "NILFS: revision mismatch " 373 "(superblock rev.=%d.%d, current rev.=%d.%d). " 374 "Please check the version of mkfs.nilfs.\n", 375 le32_to_cpu(sbp->s_rev_level), 376 le16_to_cpu(sbp->s_minor_rev_level), 377 NILFS_CURRENT_REV, NILFS_MINOR_REV); 378 return -EINVAL; 379 } 380 nilfs->ns_sbsize = le16_to_cpu(sbp->s_bytes); 381 if (nilfs->ns_sbsize > BLOCK_SIZE) 382 return -EINVAL; 383 384 nilfs->ns_inode_size = le16_to_cpu(sbp->s_inode_size); 385 nilfs->ns_first_ino = le32_to_cpu(sbp->s_first_ino); 386 387 nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment); 388 if (nilfs->ns_blocks_per_segment < NILFS_SEG_MIN_BLOCKS) { 389 printk(KERN_ERR "NILFS: too short segment.\n"); 390 return -EINVAL; 391 } 392 393 nilfs->ns_first_data_block = le64_to_cpu(sbp->s_first_data_block); 394 nilfs->ns_nsegments = le64_to_cpu(sbp->s_nsegments); 395 nilfs->ns_r_segments_percentage = 396 le32_to_cpu(sbp->s_r_segments_percentage); 397 nilfs->ns_nrsvsegs = 398 max_t(unsigned long, NILFS_MIN_NRSVSEGS, 399 DIV_ROUND_UP(nilfs->ns_nsegments * 400 nilfs->ns_r_segments_percentage, 100)); 401 nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed); 402 return 0; 403 } 404 405 static int nilfs_valid_sb(struct nilfs_super_block *sbp) 406 { 407 static unsigned char sum[4]; 408 const int sumoff = offsetof(struct nilfs_super_block, s_sum); 409 size_t bytes; 410 u32 crc; 411 412 if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC) 413 return 0; 414 bytes = le16_to_cpu(sbp->s_bytes); 415 if (bytes > BLOCK_SIZE) 416 return 0; 417 crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp, 418 sumoff); 419 crc = crc32_le(crc, sum, 4); 420 crc = crc32_le(crc, (unsigned char *)sbp + sumoff + 4, 421 bytes - sumoff - 4); 422 return crc == le32_to_cpu(sbp->s_sum); 423 } 424 425 static int nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset) 426 { 427 return offset < ((le64_to_cpu(sbp->s_nsegments) * 428 le32_to_cpu(sbp->s_blocks_per_segment)) << 429 (le32_to_cpu(sbp->s_log_block_size) + 10)); 430 } 431 432 static void nilfs_release_super_block(struct the_nilfs *nilfs) 433 { 434 int i; 435 436 for (i = 0; i < 2; i++) { 437 if (nilfs->ns_sbp[i]) { 438 brelse(nilfs->ns_sbh[i]); 439 nilfs->ns_sbh[i] = NULL; 440 nilfs->ns_sbp[i] = NULL; 441 } 442 } 443 } 444 445 void nilfs_fall_back_super_block(struct the_nilfs *nilfs) 446 { 447 brelse(nilfs->ns_sbh[0]); 448 nilfs->ns_sbh[0] = nilfs->ns_sbh[1]; 449 nilfs->ns_sbp[0] = nilfs->ns_sbp[1]; 450 nilfs->ns_sbh[1] = NULL; 451 nilfs->ns_sbp[1] = NULL; 452 } 453 454 void nilfs_swap_super_block(struct the_nilfs *nilfs) 455 { 456 struct buffer_head *tsbh = nilfs->ns_sbh[0]; 457 struct nilfs_super_block *tsbp = nilfs->ns_sbp[0]; 458 459 nilfs->ns_sbh[0] = nilfs->ns_sbh[1]; 460 nilfs->ns_sbp[0] = nilfs->ns_sbp[1]; 461 nilfs->ns_sbh[1] = tsbh; 462 nilfs->ns_sbp[1] = tsbp; 463 } 464 465 static int nilfs_load_super_block(struct the_nilfs *nilfs, 466 struct super_block *sb, int blocksize, 467 struct nilfs_super_block **sbpp) 468 { 469 struct nilfs_super_block **sbp = nilfs->ns_sbp; 470 struct buffer_head **sbh = nilfs->ns_sbh; 471 u64 sb2off = NILFS_SB2_OFFSET_BYTES(nilfs->ns_bdev->bd_inode->i_size); 472 int valid[2], swp = 0; 473 474 sbp[0] = nilfs_read_super_block(sb, NILFS_SB_OFFSET_BYTES, blocksize, 475 &sbh[0]); 476 sbp[1] = nilfs_read_super_block(sb, sb2off, blocksize, &sbh[1]); 477 478 if (!sbp[0]) { 479 if (!sbp[1]) { 480 printk(KERN_ERR "NILFS: unable to read superblock\n"); 481 return -EIO; 482 } 483 printk(KERN_WARNING 484 "NILFS warning: unable to read primary superblock\n"); 485 } else if (!sbp[1]) 486 printk(KERN_WARNING 487 "NILFS warning: unable to read secondary superblock\n"); 488 489 /* 490 * Compare two super blocks and set 1 in swp if the secondary 491 * super block is valid and newer. Otherwise, set 0 in swp. 492 */ 493 valid[0] = nilfs_valid_sb(sbp[0]); 494 valid[1] = nilfs_valid_sb(sbp[1]); 495 swp = valid[1] && (!valid[0] || 496 le64_to_cpu(sbp[1]->s_last_cno) > 497 le64_to_cpu(sbp[0]->s_last_cno)); 498 499 if (valid[swp] && nilfs_sb2_bad_offset(sbp[swp], sb2off)) { 500 brelse(sbh[1]); 501 sbh[1] = NULL; 502 sbp[1] = NULL; 503 swp = 0; 504 } 505 if (!valid[swp]) { 506 nilfs_release_super_block(nilfs); 507 printk(KERN_ERR "NILFS: Can't find nilfs on dev %s.\n", 508 sb->s_id); 509 return -EINVAL; 510 } 511 512 if (swp) { 513 printk(KERN_WARNING "NILFS warning: broken superblock. " 514 "using spare superblock.\n"); 515 nilfs_swap_super_block(nilfs); 516 } 517 518 nilfs->ns_sbwtime[0] = le64_to_cpu(sbp[0]->s_wtime); 519 nilfs->ns_sbwtime[1] = valid[!swp] ? le64_to_cpu(sbp[1]->s_wtime) : 0; 520 nilfs->ns_prot_seq = le64_to_cpu(sbp[valid[1] & !swp]->s_last_seq); 521 *sbpp = sbp[0]; 522 return 0; 523 } 524 525 /** 526 * init_nilfs - initialize a NILFS instance. 527 * @nilfs: the_nilfs structure 528 * @sbi: nilfs_sb_info 529 * @sb: super block 530 * @data: mount options 531 * 532 * init_nilfs() performs common initialization per block device (e.g. 533 * reading the super block, getting disk layout information, initializing 534 * shared fields in the_nilfs). It takes on some portion of the jobs 535 * typically done by a fill_super() routine. This division arises from 536 * the nature that multiple NILFS instances may be simultaneously 537 * mounted on a device. 538 * For multiple mounts on the same device, only the first mount 539 * invokes these tasks. 540 * 541 * Return Value: On success, 0 is returned. On error, a negative error 542 * code is returned. 543 */ 544 int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data) 545 { 546 struct super_block *sb = sbi->s_super; 547 struct nilfs_super_block *sbp; 548 struct backing_dev_info *bdi; 549 int blocksize; 550 int err; 551 552 down_write(&nilfs->ns_sem); 553 if (nilfs_init(nilfs)) { 554 /* Load values from existing the_nilfs */ 555 sbp = nilfs->ns_sbp[0]; 556 err = nilfs_store_magic_and_option(sb, sbp, data); 557 if (err) 558 goto out; 559 560 blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size); 561 if (sb->s_blocksize != blocksize && 562 !sb_set_blocksize(sb, blocksize)) { 563 printk(KERN_ERR "NILFS: blocksize %d unfit to device\n", 564 blocksize); 565 err = -EINVAL; 566 } 567 sb->s_maxbytes = nilfs_max_size(sb->s_blocksize_bits); 568 goto out; 569 } 570 571 blocksize = sb_min_blocksize(sb, BLOCK_SIZE); 572 if (!blocksize) { 573 printk(KERN_ERR "NILFS: unable to set blocksize\n"); 574 err = -EINVAL; 575 goto out; 576 } 577 err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp); 578 if (err) 579 goto out; 580 581 err = nilfs_store_magic_and_option(sb, sbp, data); 582 if (err) 583 goto failed_sbh; 584 585 blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size); 586 if (sb->s_blocksize != blocksize) { 587 int hw_blocksize = bdev_logical_block_size(sb->s_bdev); 588 589 if (blocksize < hw_blocksize) { 590 printk(KERN_ERR 591 "NILFS: blocksize %d too small for device " 592 "(sector-size = %d).\n", 593 blocksize, hw_blocksize); 594 err = -EINVAL; 595 goto failed_sbh; 596 } 597 nilfs_release_super_block(nilfs); 598 sb_set_blocksize(sb, blocksize); 599 600 err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp); 601 if (err) 602 goto out; 603 /* not failed_sbh; sbh is released automatically 604 when reloading fails. */ 605 } 606 nilfs->ns_blocksize_bits = sb->s_blocksize_bits; 607 608 err = nilfs_store_disk_layout(nilfs, sbp); 609 if (err) 610 goto failed_sbh; 611 612 sb->s_maxbytes = nilfs_max_size(sb->s_blocksize_bits); 613 614 nilfs->ns_mount_state = le16_to_cpu(sbp->s_state); 615 616 bdi = nilfs->ns_bdev->bd_inode->i_mapping->backing_dev_info; 617 nilfs->ns_bdi = bdi ? : &default_backing_dev_info; 618 619 /* Finding last segment */ 620 nilfs->ns_last_pseg = le64_to_cpu(sbp->s_last_pseg); 621 nilfs->ns_last_cno = le64_to_cpu(sbp->s_last_cno); 622 nilfs->ns_last_seq = le64_to_cpu(sbp->s_last_seq); 623 624 nilfs->ns_seg_seq = nilfs->ns_last_seq; 625 nilfs->ns_segnum = 626 nilfs_get_segnum_of_block(nilfs, nilfs->ns_last_pseg); 627 nilfs->ns_cno = nilfs->ns_last_cno + 1; 628 if (nilfs->ns_segnum >= nilfs->ns_nsegments) { 629 printk(KERN_ERR "NILFS invalid last segment number.\n"); 630 err = -EINVAL; 631 goto failed_sbh; 632 } 633 /* Dummy values */ 634 nilfs->ns_free_segments_count = 635 nilfs->ns_nsegments - (nilfs->ns_segnum + 1); 636 637 /* Initialize gcinode cache */ 638 err = nilfs_init_gccache(nilfs); 639 if (err) 640 goto failed_sbh; 641 642 set_nilfs_init(nilfs); 643 err = 0; 644 out: 645 up_write(&nilfs->ns_sem); 646 return err; 647 648 failed_sbh: 649 nilfs_release_super_block(nilfs); 650 goto out; 651 } 652 653 int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump, 654 size_t nsegs) 655 { 656 sector_t seg_start, seg_end; 657 sector_t start = 0, nblocks = 0; 658 unsigned int sects_per_block; 659 __u64 *sn; 660 int ret = 0; 661 662 sects_per_block = (1 << nilfs->ns_blocksize_bits) / 663 bdev_logical_block_size(nilfs->ns_bdev); 664 for (sn = segnump; sn < segnump + nsegs; sn++) { 665 nilfs_get_segment_range(nilfs, *sn, &seg_start, &seg_end); 666 667 if (!nblocks) { 668 start = seg_start; 669 nblocks = seg_end - seg_start + 1; 670 } else if (start + nblocks == seg_start) { 671 nblocks += seg_end - seg_start + 1; 672 } else { 673 ret = blkdev_issue_discard(nilfs->ns_bdev, 674 start * sects_per_block, 675 nblocks * sects_per_block, 676 GFP_NOFS, 677 BLKDEV_IFL_BARRIER); 678 if (ret < 0) 679 return ret; 680 nblocks = 0; 681 } 682 } 683 if (nblocks) 684 ret = blkdev_issue_discard(nilfs->ns_bdev, 685 start * sects_per_block, 686 nblocks * sects_per_block, 687 GFP_NOFS, BLKDEV_IFL_BARRIER); 688 return ret; 689 } 690 691 int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks) 692 { 693 struct inode *dat = nilfs_dat_inode(nilfs); 694 unsigned long ncleansegs; 695 696 down_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ 697 ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile); 698 up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ 699 *nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment; 700 return 0; 701 } 702 703 int nilfs_near_disk_full(struct the_nilfs *nilfs) 704 { 705 unsigned long ncleansegs, nincsegs; 706 707 ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile); 708 nincsegs = atomic_read(&nilfs->ns_ndirtyblks) / 709 nilfs->ns_blocks_per_segment + 1; 710 711 return ncleansegs <= nilfs->ns_nrsvsegs + nincsegs; 712 } 713 714 /** 715 * nilfs_find_sbinfo - find existing nilfs_sb_info structure 716 * @nilfs: nilfs object 717 * @rw_mount: mount type (non-zero value for read/write mount) 718 * @cno: checkpoint number (zero for read-only mount) 719 * 720 * nilfs_find_sbinfo() returns the nilfs_sb_info structure which 721 * @rw_mount and @cno (in case of snapshots) matched. If no instance 722 * was found, NULL is returned. Although the super block instance can 723 * be unmounted after this function returns, the nilfs_sb_info struct 724 * is kept on memory until nilfs_put_sbinfo() is called. 725 */ 726 struct nilfs_sb_info *nilfs_find_sbinfo(struct the_nilfs *nilfs, 727 int rw_mount, __u64 cno) 728 { 729 struct nilfs_sb_info *sbi; 730 731 down_read(&nilfs->ns_super_sem); 732 /* 733 * The SNAPSHOT flag and sb->s_flags are supposed to be 734 * protected with nilfs->ns_super_sem. 735 */ 736 sbi = nilfs->ns_current; 737 if (rw_mount) { 738 if (sbi && !(sbi->s_super->s_flags & MS_RDONLY)) 739 goto found; /* read/write mount */ 740 else 741 goto out; 742 } else if (cno == 0) { 743 if (sbi && (sbi->s_super->s_flags & MS_RDONLY)) 744 goto found; /* read-only mount */ 745 else 746 goto out; 747 } 748 749 list_for_each_entry(sbi, &nilfs->ns_supers, s_list) { 750 if (nilfs_test_opt(sbi, SNAPSHOT) && 751 sbi->s_snapshot_cno == cno) 752 goto found; /* snapshot mount */ 753 } 754 out: 755 up_read(&nilfs->ns_super_sem); 756 return NULL; 757 758 found: 759 atomic_inc(&sbi->s_count); 760 up_read(&nilfs->ns_super_sem); 761 return sbi; 762 } 763 764 int nilfs_checkpoint_is_mounted(struct the_nilfs *nilfs, __u64 cno, 765 int snapshot_mount) 766 { 767 struct nilfs_sb_info *sbi; 768 int ret = 0; 769 770 down_read(&nilfs->ns_super_sem); 771 if (cno == 0 || cno > nilfs->ns_cno) 772 goto out_unlock; 773 774 list_for_each_entry(sbi, &nilfs->ns_supers, s_list) { 775 if (sbi->s_snapshot_cno == cno && 776 (!snapshot_mount || nilfs_test_opt(sbi, SNAPSHOT))) { 777 /* exclude read-only mounts */ 778 ret++; 779 break; 780 } 781 } 782 /* for protecting recent checkpoints */ 783 if (cno >= nilfs_last_cno(nilfs)) 784 ret++; 785 786 out_unlock: 787 up_read(&nilfs->ns_super_sem); 788 return ret; 789 } 790