1 /* 2 * segment.c - NILFS segment constructor. 3 * 4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 19 * 20 * Written by Ryusuke Konishi <ryusuke@osrg.net> 21 * 22 */ 23 24 #include <linux/pagemap.h> 25 #include <linux/buffer_head.h> 26 #include <linux/writeback.h> 27 #include <linux/bio.h> 28 #include <linux/completion.h> 29 #include <linux/blkdev.h> 30 #include <linux/backing-dev.h> 31 #include <linux/freezer.h> 32 #include <linux/kthread.h> 33 #include <linux/crc32.h> 34 #include <linux/pagevec.h> 35 #include <linux/slab.h> 36 #include "nilfs.h" 37 #include "btnode.h" 38 #include "page.h" 39 #include "segment.h" 40 #include "sufile.h" 41 #include "cpfile.h" 42 #include "ifile.h" 43 #include "segbuf.h" 44 45 46 /* 47 * Segment constructor 48 */ 49 #define SC_N_INODEVEC 16 /* Size of locally allocated inode vector */ 50 51 #define SC_MAX_SEGDELTA 64 /* Upper limit of the number of segments 52 appended in collection retry loop */ 53 54 /* Construction mode */ 55 enum { 56 SC_LSEG_SR = 1, /* Make a logical segment having a super root */ 57 SC_LSEG_DSYNC, /* Flush data blocks of a given file and make 58 a logical segment without a super root */ 59 SC_FLUSH_FILE, /* Flush data files, leads to segment writes without 60 creating a checkpoint */ 61 SC_FLUSH_DAT, /* Flush DAT file. This also creates segments without 62 a checkpoint */ 63 }; 64 65 /* Stage numbers of dirty block collection */ 66 enum { 67 NILFS_ST_INIT = 0, 68 NILFS_ST_GC, /* Collecting dirty blocks for GC */ 69 NILFS_ST_FILE, 70 NILFS_ST_IFILE, 71 NILFS_ST_CPFILE, 72 NILFS_ST_SUFILE, 73 NILFS_ST_DAT, 74 NILFS_ST_SR, /* Super root */ 75 NILFS_ST_DSYNC, /* Data sync blocks */ 76 NILFS_ST_DONE, 77 }; 78 79 /* State flags of collection */ 80 #define NILFS_CF_NODE 0x0001 /* Collecting node blocks */ 81 #define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */ 82 #define NILFS_CF_SUFREED 0x0004 /* segment usages has been freed */ 83 #define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED) 84 85 /* Operations depending on the construction mode and file type */ 86 struct nilfs_sc_operations { 87 int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *, 88 struct inode *); 89 int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *, 90 struct inode *); 91 int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *, 92 struct inode *); 93 void (*write_data_binfo)(struct nilfs_sc_info *, 94 struct nilfs_segsum_pointer *, 95 union nilfs_binfo *); 96 void (*write_node_binfo)(struct nilfs_sc_info *, 97 struct nilfs_segsum_pointer *, 98 union nilfs_binfo *); 99 }; 100 101 /* 102 * Other definitions 103 */ 104 static void nilfs_segctor_start_timer(struct nilfs_sc_info *); 105 static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int); 106 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *); 107 static void nilfs_dispose_list(struct nilfs_sb_info *, struct list_head *, 108 int); 109 110 #define nilfs_cnt32_gt(a, b) \ 111 (typecheck(__u32, a) && typecheck(__u32, b) && \ 112 ((__s32)(b) - (__s32)(a) < 0)) 113 #define nilfs_cnt32_ge(a, b) \ 114 (typecheck(__u32, a) && typecheck(__u32, b) && \ 115 ((__s32)(a) - (__s32)(b) >= 0)) 116 #define nilfs_cnt32_lt(a, b) nilfs_cnt32_gt(b, a) 117 #define nilfs_cnt32_le(a, b) nilfs_cnt32_ge(b, a) 118 119 static int nilfs_prepare_segment_lock(struct nilfs_transaction_info *ti) 120 { 121 struct nilfs_transaction_info *cur_ti = current->journal_info; 122 void *save = NULL; 123 124 if (cur_ti) { 125 if (cur_ti->ti_magic == NILFS_TI_MAGIC) 126 return ++cur_ti->ti_count; 127 else { 128 /* 129 * If journal_info field is occupied by other FS, 130 * it is saved and will be restored on 131 * nilfs_transaction_commit(). 132 */ 133 printk(KERN_WARNING 134 "NILFS warning: journal info from a different " 135 "FS\n"); 136 save = current->journal_info; 137 } 138 } 139 if (!ti) { 140 ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS); 141 if (!ti) 142 return -ENOMEM; 143 ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC; 144 } else { 145 ti->ti_flags = 0; 146 } 147 ti->ti_count = 0; 148 ti->ti_save = save; 149 ti->ti_magic = NILFS_TI_MAGIC; 150 current->journal_info = ti; 151 return 0; 152 } 153 154 /** 155 * nilfs_transaction_begin - start indivisible file operations. 156 * @sb: super block 157 * @ti: nilfs_transaction_info 158 * @vacancy_check: flags for vacancy rate checks 159 * 160 * nilfs_transaction_begin() acquires a reader/writer semaphore, called 161 * the segment semaphore, to make a segment construction and write tasks 162 * exclusive. The function is used with nilfs_transaction_commit() in pairs. 163 * The region enclosed by these two functions can be nested. To avoid a 164 * deadlock, the semaphore is only acquired or released in the outermost call. 165 * 166 * This function allocates a nilfs_transaction_info struct to keep context 167 * information on it. It is initialized and hooked onto the current task in 168 * the outermost call. If a pre-allocated struct is given to @ti, it is used 169 * instead; otherwise a new struct is assigned from a slab. 170 * 171 * When @vacancy_check flag is set, this function will check the amount of 172 * free space, and will wait for the GC to reclaim disk space if low capacity. 173 * 174 * Return Value: On success, 0 is returned. On error, one of the following 175 * negative error code is returned. 176 * 177 * %-ENOMEM - Insufficient memory available. 178 * 179 * %-ENOSPC - No space left on device 180 */ 181 int nilfs_transaction_begin(struct super_block *sb, 182 struct nilfs_transaction_info *ti, 183 int vacancy_check) 184 { 185 struct nilfs_sb_info *sbi; 186 struct the_nilfs *nilfs; 187 int ret = nilfs_prepare_segment_lock(ti); 188 189 if (unlikely(ret < 0)) 190 return ret; 191 if (ret > 0) 192 return 0; 193 194 sbi = NILFS_SB(sb); 195 nilfs = sbi->s_nilfs; 196 down_read(&nilfs->ns_segctor_sem); 197 if (vacancy_check && nilfs_near_disk_full(nilfs)) { 198 up_read(&nilfs->ns_segctor_sem); 199 ret = -ENOSPC; 200 goto failed; 201 } 202 return 0; 203 204 failed: 205 ti = current->journal_info; 206 current->journal_info = ti->ti_save; 207 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC) 208 kmem_cache_free(nilfs_transaction_cachep, ti); 209 return ret; 210 } 211 212 /** 213 * nilfs_transaction_commit - commit indivisible file operations. 214 * @sb: super block 215 * 216 * nilfs_transaction_commit() releases the read semaphore which is 217 * acquired by nilfs_transaction_begin(). This is only performed 218 * in outermost call of this function. If a commit flag is set, 219 * nilfs_transaction_commit() sets a timer to start the segment 220 * constructor. If a sync flag is set, it starts construction 221 * directly. 222 */ 223 int nilfs_transaction_commit(struct super_block *sb) 224 { 225 struct nilfs_transaction_info *ti = current->journal_info; 226 struct nilfs_sb_info *sbi; 227 struct nilfs_sc_info *sci; 228 int err = 0; 229 230 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC); 231 ti->ti_flags |= NILFS_TI_COMMIT; 232 if (ti->ti_count > 0) { 233 ti->ti_count--; 234 return 0; 235 } 236 sbi = NILFS_SB(sb); 237 sci = NILFS_SC(sbi); 238 if (sci != NULL) { 239 if (ti->ti_flags & NILFS_TI_COMMIT) 240 nilfs_segctor_start_timer(sci); 241 if (atomic_read(&sbi->s_nilfs->ns_ndirtyblks) > 242 sci->sc_watermark) 243 nilfs_segctor_do_flush(sci, 0); 244 } 245 up_read(&sbi->s_nilfs->ns_segctor_sem); 246 current->journal_info = ti->ti_save; 247 248 if (ti->ti_flags & NILFS_TI_SYNC) 249 err = nilfs_construct_segment(sb); 250 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC) 251 kmem_cache_free(nilfs_transaction_cachep, ti); 252 return err; 253 } 254 255 void nilfs_transaction_abort(struct super_block *sb) 256 { 257 struct nilfs_transaction_info *ti = current->journal_info; 258 259 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC); 260 if (ti->ti_count > 0) { 261 ti->ti_count--; 262 return; 263 } 264 up_read(&NILFS_SB(sb)->s_nilfs->ns_segctor_sem); 265 266 current->journal_info = ti->ti_save; 267 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC) 268 kmem_cache_free(nilfs_transaction_cachep, ti); 269 } 270 271 void nilfs_relax_pressure_in_lock(struct super_block *sb) 272 { 273 struct nilfs_sb_info *sbi = NILFS_SB(sb); 274 struct nilfs_sc_info *sci = NILFS_SC(sbi); 275 struct the_nilfs *nilfs = sbi->s_nilfs; 276 277 if (!sci || !sci->sc_flush_request) 278 return; 279 280 set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags); 281 up_read(&nilfs->ns_segctor_sem); 282 283 down_write(&nilfs->ns_segctor_sem); 284 if (sci->sc_flush_request && 285 test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) { 286 struct nilfs_transaction_info *ti = current->journal_info; 287 288 ti->ti_flags |= NILFS_TI_WRITER; 289 nilfs_segctor_do_immediate_flush(sci); 290 ti->ti_flags &= ~NILFS_TI_WRITER; 291 } 292 downgrade_write(&nilfs->ns_segctor_sem); 293 } 294 295 static void nilfs_transaction_lock(struct nilfs_sb_info *sbi, 296 struct nilfs_transaction_info *ti, 297 int gcflag) 298 { 299 struct nilfs_transaction_info *cur_ti = current->journal_info; 300 301 WARN_ON(cur_ti); 302 ti->ti_flags = NILFS_TI_WRITER; 303 ti->ti_count = 0; 304 ti->ti_save = cur_ti; 305 ti->ti_magic = NILFS_TI_MAGIC; 306 INIT_LIST_HEAD(&ti->ti_garbage); 307 current->journal_info = ti; 308 309 for (;;) { 310 down_write(&sbi->s_nilfs->ns_segctor_sem); 311 if (!test_bit(NILFS_SC_PRIOR_FLUSH, &NILFS_SC(sbi)->sc_flags)) 312 break; 313 314 nilfs_segctor_do_immediate_flush(NILFS_SC(sbi)); 315 316 up_write(&sbi->s_nilfs->ns_segctor_sem); 317 yield(); 318 } 319 if (gcflag) 320 ti->ti_flags |= NILFS_TI_GC; 321 } 322 323 static void nilfs_transaction_unlock(struct nilfs_sb_info *sbi) 324 { 325 struct nilfs_transaction_info *ti = current->journal_info; 326 327 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC); 328 BUG_ON(ti->ti_count > 0); 329 330 up_write(&sbi->s_nilfs->ns_segctor_sem); 331 current->journal_info = ti->ti_save; 332 if (!list_empty(&ti->ti_garbage)) 333 nilfs_dispose_list(sbi, &ti->ti_garbage, 0); 334 } 335 336 static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci, 337 struct nilfs_segsum_pointer *ssp, 338 unsigned bytes) 339 { 340 struct nilfs_segment_buffer *segbuf = sci->sc_curseg; 341 unsigned blocksize = sci->sc_super->s_blocksize; 342 void *p; 343 344 if (unlikely(ssp->offset + bytes > blocksize)) { 345 ssp->offset = 0; 346 BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh, 347 &segbuf->sb_segsum_buffers)); 348 ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh); 349 } 350 p = ssp->bh->b_data + ssp->offset; 351 ssp->offset += bytes; 352 return p; 353 } 354 355 /** 356 * nilfs_segctor_reset_segment_buffer - reset the current segment buffer 357 * @sci: nilfs_sc_info 358 */ 359 static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci) 360 { 361 struct nilfs_segment_buffer *segbuf = sci->sc_curseg; 362 struct buffer_head *sumbh; 363 unsigned sumbytes; 364 unsigned flags = 0; 365 int err; 366 367 if (nilfs_doing_gc()) 368 flags = NILFS_SS_GC; 369 err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, 370 sci->sc_sbi->s_nilfs->ns_cno); 371 if (unlikely(err)) 372 return err; 373 374 sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers); 375 sumbytes = segbuf->sb_sum.sumbytes; 376 sci->sc_finfo_ptr.bh = sumbh; sci->sc_finfo_ptr.offset = sumbytes; 377 sci->sc_binfo_ptr.bh = sumbh; sci->sc_binfo_ptr.offset = sumbytes; 378 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0; 379 return 0; 380 } 381 382 static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci) 383 { 384 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks; 385 if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs)) 386 return -E2BIG; /* The current segment is filled up 387 (internal code) */ 388 sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg); 389 return nilfs_segctor_reset_segment_buffer(sci); 390 } 391 392 static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci) 393 { 394 struct nilfs_segment_buffer *segbuf = sci->sc_curseg; 395 int err; 396 397 if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) { 398 err = nilfs_segctor_feed_segment(sci); 399 if (err) 400 return err; 401 segbuf = sci->sc_curseg; 402 } 403 err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root); 404 if (likely(!err)) 405 segbuf->sb_sum.flags |= NILFS_SS_SR; 406 return err; 407 } 408 409 /* 410 * Functions for making segment summary and payloads 411 */ 412 static int nilfs_segctor_segsum_block_required( 413 struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp, 414 unsigned binfo_size) 415 { 416 unsigned blocksize = sci->sc_super->s_blocksize; 417 /* Size of finfo and binfo is enough small against blocksize */ 418 419 return ssp->offset + binfo_size + 420 (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) > 421 blocksize; 422 } 423 424 static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci, 425 struct inode *inode) 426 { 427 sci->sc_curseg->sb_sum.nfinfo++; 428 sci->sc_binfo_ptr = sci->sc_finfo_ptr; 429 nilfs_segctor_map_segsum_entry( 430 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo)); 431 432 if (inode->i_sb && !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags)) 433 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags); 434 /* skip finfo */ 435 } 436 437 static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci, 438 struct inode *inode) 439 { 440 struct nilfs_finfo *finfo; 441 struct nilfs_inode_info *ii; 442 struct nilfs_segment_buffer *segbuf; 443 444 if (sci->sc_blk_cnt == 0) 445 return; 446 447 ii = NILFS_I(inode); 448 finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr, 449 sizeof(*finfo)); 450 finfo->fi_ino = cpu_to_le64(inode->i_ino); 451 finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt); 452 finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt); 453 finfo->fi_cno = cpu_to_le64(ii->i_cno); 454 455 segbuf = sci->sc_curseg; 456 segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset + 457 sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1); 458 sci->sc_finfo_ptr = sci->sc_binfo_ptr; 459 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0; 460 } 461 462 static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci, 463 struct buffer_head *bh, 464 struct inode *inode, 465 unsigned binfo_size) 466 { 467 struct nilfs_segment_buffer *segbuf; 468 int required, err = 0; 469 470 retry: 471 segbuf = sci->sc_curseg; 472 required = nilfs_segctor_segsum_block_required( 473 sci, &sci->sc_binfo_ptr, binfo_size); 474 if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) { 475 nilfs_segctor_end_finfo(sci, inode); 476 err = nilfs_segctor_feed_segment(sci); 477 if (err) 478 return err; 479 goto retry; 480 } 481 if (unlikely(required)) { 482 err = nilfs_segbuf_extend_segsum(segbuf); 483 if (unlikely(err)) 484 goto failed; 485 } 486 if (sci->sc_blk_cnt == 0) 487 nilfs_segctor_begin_finfo(sci, inode); 488 489 nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size); 490 /* Substitution to vblocknr is delayed until update_blocknr() */ 491 nilfs_segbuf_add_file_buffer(segbuf, bh); 492 sci->sc_blk_cnt++; 493 failed: 494 return err; 495 } 496 497 static int nilfs_handle_bmap_error(int err, const char *fname, 498 struct inode *inode, struct super_block *sb) 499 { 500 if (err == -EINVAL) { 501 nilfs_error(sb, fname, "broken bmap (inode=%lu)\n", 502 inode->i_ino); 503 err = -EIO; 504 } 505 return err; 506 } 507 508 /* 509 * Callback functions that enumerate, mark, and collect dirty blocks 510 */ 511 static int nilfs_collect_file_data(struct nilfs_sc_info *sci, 512 struct buffer_head *bh, struct inode *inode) 513 { 514 int err; 515 516 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh); 517 if (unlikely(err < 0)) 518 return nilfs_handle_bmap_error(err, __func__, inode, 519 sci->sc_super); 520 521 err = nilfs_segctor_add_file_block(sci, bh, inode, 522 sizeof(struct nilfs_binfo_v)); 523 if (!err) 524 sci->sc_datablk_cnt++; 525 return err; 526 } 527 528 static int nilfs_collect_file_node(struct nilfs_sc_info *sci, 529 struct buffer_head *bh, 530 struct inode *inode) 531 { 532 int err; 533 534 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh); 535 if (unlikely(err < 0)) 536 return nilfs_handle_bmap_error(err, __func__, inode, 537 sci->sc_super); 538 return 0; 539 } 540 541 static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci, 542 struct buffer_head *bh, 543 struct inode *inode) 544 { 545 WARN_ON(!buffer_dirty(bh)); 546 return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64)); 547 } 548 549 static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci, 550 struct nilfs_segsum_pointer *ssp, 551 union nilfs_binfo *binfo) 552 { 553 struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry( 554 sci, ssp, sizeof(*binfo_v)); 555 *binfo_v = binfo->bi_v; 556 } 557 558 static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci, 559 struct nilfs_segsum_pointer *ssp, 560 union nilfs_binfo *binfo) 561 { 562 __le64 *vblocknr = nilfs_segctor_map_segsum_entry( 563 sci, ssp, sizeof(*vblocknr)); 564 *vblocknr = binfo->bi_v.bi_vblocknr; 565 } 566 567 static struct nilfs_sc_operations nilfs_sc_file_ops = { 568 .collect_data = nilfs_collect_file_data, 569 .collect_node = nilfs_collect_file_node, 570 .collect_bmap = nilfs_collect_file_bmap, 571 .write_data_binfo = nilfs_write_file_data_binfo, 572 .write_node_binfo = nilfs_write_file_node_binfo, 573 }; 574 575 static int nilfs_collect_dat_data(struct nilfs_sc_info *sci, 576 struct buffer_head *bh, struct inode *inode) 577 { 578 int err; 579 580 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh); 581 if (unlikely(err < 0)) 582 return nilfs_handle_bmap_error(err, __func__, inode, 583 sci->sc_super); 584 585 err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64)); 586 if (!err) 587 sci->sc_datablk_cnt++; 588 return err; 589 } 590 591 static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci, 592 struct buffer_head *bh, struct inode *inode) 593 { 594 WARN_ON(!buffer_dirty(bh)); 595 return nilfs_segctor_add_file_block(sci, bh, inode, 596 sizeof(struct nilfs_binfo_dat)); 597 } 598 599 static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci, 600 struct nilfs_segsum_pointer *ssp, 601 union nilfs_binfo *binfo) 602 { 603 __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp, 604 sizeof(*blkoff)); 605 *blkoff = binfo->bi_dat.bi_blkoff; 606 } 607 608 static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci, 609 struct nilfs_segsum_pointer *ssp, 610 union nilfs_binfo *binfo) 611 { 612 struct nilfs_binfo_dat *binfo_dat = 613 nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat)); 614 *binfo_dat = binfo->bi_dat; 615 } 616 617 static struct nilfs_sc_operations nilfs_sc_dat_ops = { 618 .collect_data = nilfs_collect_dat_data, 619 .collect_node = nilfs_collect_file_node, 620 .collect_bmap = nilfs_collect_dat_bmap, 621 .write_data_binfo = nilfs_write_dat_data_binfo, 622 .write_node_binfo = nilfs_write_dat_node_binfo, 623 }; 624 625 static struct nilfs_sc_operations nilfs_sc_dsync_ops = { 626 .collect_data = nilfs_collect_file_data, 627 .collect_node = NULL, 628 .collect_bmap = NULL, 629 .write_data_binfo = nilfs_write_file_data_binfo, 630 .write_node_binfo = NULL, 631 }; 632 633 static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode, 634 struct list_head *listp, 635 size_t nlimit, 636 loff_t start, loff_t end) 637 { 638 struct address_space *mapping = inode->i_mapping; 639 struct pagevec pvec; 640 pgoff_t index = 0, last = ULONG_MAX; 641 size_t ndirties = 0; 642 int i; 643 644 if (unlikely(start != 0 || end != LLONG_MAX)) { 645 /* 646 * A valid range is given for sync-ing data pages. The 647 * range is rounded to per-page; extra dirty buffers 648 * may be included if blocksize < pagesize. 649 */ 650 index = start >> PAGE_SHIFT; 651 last = end >> PAGE_SHIFT; 652 } 653 pagevec_init(&pvec, 0); 654 repeat: 655 if (unlikely(index > last) || 656 !pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY, 657 min_t(pgoff_t, last - index, 658 PAGEVEC_SIZE - 1) + 1)) 659 return ndirties; 660 661 for (i = 0; i < pagevec_count(&pvec); i++) { 662 struct buffer_head *bh, *head; 663 struct page *page = pvec.pages[i]; 664 665 if (unlikely(page->index > last)) 666 break; 667 668 if (mapping->host) { 669 lock_page(page); 670 if (!page_has_buffers(page)) 671 create_empty_buffers(page, 672 1 << inode->i_blkbits, 0); 673 unlock_page(page); 674 } 675 676 bh = head = page_buffers(page); 677 do { 678 if (!buffer_dirty(bh)) 679 continue; 680 get_bh(bh); 681 list_add_tail(&bh->b_assoc_buffers, listp); 682 ndirties++; 683 if (unlikely(ndirties >= nlimit)) { 684 pagevec_release(&pvec); 685 cond_resched(); 686 return ndirties; 687 } 688 } while (bh = bh->b_this_page, bh != head); 689 } 690 pagevec_release(&pvec); 691 cond_resched(); 692 goto repeat; 693 } 694 695 static void nilfs_lookup_dirty_node_buffers(struct inode *inode, 696 struct list_head *listp) 697 { 698 struct nilfs_inode_info *ii = NILFS_I(inode); 699 struct address_space *mapping = &ii->i_btnode_cache; 700 struct pagevec pvec; 701 struct buffer_head *bh, *head; 702 unsigned int i; 703 pgoff_t index = 0; 704 705 pagevec_init(&pvec, 0); 706 707 while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY, 708 PAGEVEC_SIZE)) { 709 for (i = 0; i < pagevec_count(&pvec); i++) { 710 bh = head = page_buffers(pvec.pages[i]); 711 do { 712 if (buffer_dirty(bh)) { 713 get_bh(bh); 714 list_add_tail(&bh->b_assoc_buffers, 715 listp); 716 } 717 bh = bh->b_this_page; 718 } while (bh != head); 719 } 720 pagevec_release(&pvec); 721 cond_resched(); 722 } 723 } 724 725 static void nilfs_dispose_list(struct nilfs_sb_info *sbi, 726 struct list_head *head, int force) 727 { 728 struct nilfs_inode_info *ii, *n; 729 struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii; 730 unsigned nv = 0; 731 732 while (!list_empty(head)) { 733 spin_lock(&sbi->s_inode_lock); 734 list_for_each_entry_safe(ii, n, head, i_dirty) { 735 list_del_init(&ii->i_dirty); 736 if (force) { 737 if (unlikely(ii->i_bh)) { 738 brelse(ii->i_bh); 739 ii->i_bh = NULL; 740 } 741 } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) { 742 set_bit(NILFS_I_QUEUED, &ii->i_state); 743 list_add_tail(&ii->i_dirty, 744 &sbi->s_dirty_files); 745 continue; 746 } 747 ivec[nv++] = ii; 748 if (nv == SC_N_INODEVEC) 749 break; 750 } 751 spin_unlock(&sbi->s_inode_lock); 752 753 for (pii = ivec; nv > 0; pii++, nv--) 754 iput(&(*pii)->vfs_inode); 755 } 756 } 757 758 static int nilfs_test_metadata_dirty(struct nilfs_sb_info *sbi) 759 { 760 struct the_nilfs *nilfs = sbi->s_nilfs; 761 int ret = 0; 762 763 if (nilfs_mdt_fetch_dirty(sbi->s_ifile)) 764 ret++; 765 if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile)) 766 ret++; 767 if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile)) 768 ret++; 769 if (ret || nilfs_doing_gc()) 770 if (nilfs_mdt_fetch_dirty(nilfs_dat_inode(nilfs))) 771 ret++; 772 return ret; 773 } 774 775 static int nilfs_segctor_clean(struct nilfs_sc_info *sci) 776 { 777 return list_empty(&sci->sc_dirty_files) && 778 !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) && 779 sci->sc_nfreesegs == 0 && 780 (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes)); 781 } 782 783 static int nilfs_segctor_confirm(struct nilfs_sc_info *sci) 784 { 785 struct nilfs_sb_info *sbi = sci->sc_sbi; 786 int ret = 0; 787 788 if (nilfs_test_metadata_dirty(sbi)) 789 set_bit(NILFS_SC_DIRTY, &sci->sc_flags); 790 791 spin_lock(&sbi->s_inode_lock); 792 if (list_empty(&sbi->s_dirty_files) && nilfs_segctor_clean(sci)) 793 ret++; 794 795 spin_unlock(&sbi->s_inode_lock); 796 return ret; 797 } 798 799 static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci) 800 { 801 struct nilfs_sb_info *sbi = sci->sc_sbi; 802 struct the_nilfs *nilfs = sbi->s_nilfs; 803 804 nilfs_mdt_clear_dirty(sbi->s_ifile); 805 nilfs_mdt_clear_dirty(nilfs->ns_cpfile); 806 nilfs_mdt_clear_dirty(nilfs->ns_sufile); 807 nilfs_mdt_clear_dirty(nilfs_dat_inode(nilfs)); 808 } 809 810 static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci) 811 { 812 struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs; 813 struct buffer_head *bh_cp; 814 struct nilfs_checkpoint *raw_cp; 815 int err; 816 817 /* XXX: this interface will be changed */ 818 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1, 819 &raw_cp, &bh_cp); 820 if (likely(!err)) { 821 /* The following code is duplicated with cpfile. But, it is 822 needed to collect the checkpoint even if it was not newly 823 created */ 824 nilfs_mdt_mark_buffer_dirty(bh_cp); 825 nilfs_mdt_mark_dirty(nilfs->ns_cpfile); 826 nilfs_cpfile_put_checkpoint( 827 nilfs->ns_cpfile, nilfs->ns_cno, bh_cp); 828 } else 829 WARN_ON(err == -EINVAL || err == -ENOENT); 830 831 return err; 832 } 833 834 static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci) 835 { 836 struct nilfs_sb_info *sbi = sci->sc_sbi; 837 struct the_nilfs *nilfs = sbi->s_nilfs; 838 struct buffer_head *bh_cp; 839 struct nilfs_checkpoint *raw_cp; 840 int err; 841 842 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0, 843 &raw_cp, &bh_cp); 844 if (unlikely(err)) { 845 WARN_ON(err == -EINVAL || err == -ENOENT); 846 goto failed_ibh; 847 } 848 raw_cp->cp_snapshot_list.ssl_next = 0; 849 raw_cp->cp_snapshot_list.ssl_prev = 0; 850 raw_cp->cp_inodes_count = 851 cpu_to_le64(atomic_read(&sbi->s_inodes_count)); 852 raw_cp->cp_blocks_count = 853 cpu_to_le64(atomic_read(&sbi->s_blocks_count)); 854 raw_cp->cp_nblk_inc = 855 cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc); 856 raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime); 857 raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno); 858 859 if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags)) 860 nilfs_checkpoint_clear_minor(raw_cp); 861 else 862 nilfs_checkpoint_set_minor(raw_cp); 863 864 nilfs_write_inode_common(sbi->s_ifile, &raw_cp->cp_ifile_inode, 1); 865 nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp); 866 return 0; 867 868 failed_ibh: 869 return err; 870 } 871 872 static void nilfs_fill_in_file_bmap(struct inode *ifile, 873 struct nilfs_inode_info *ii) 874 875 { 876 struct buffer_head *ibh; 877 struct nilfs_inode *raw_inode; 878 879 if (test_bit(NILFS_I_BMAP, &ii->i_state)) { 880 ibh = ii->i_bh; 881 BUG_ON(!ibh); 882 raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino, 883 ibh); 884 nilfs_bmap_write(ii->i_bmap, raw_inode); 885 nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh); 886 } 887 } 888 889 static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci, 890 struct inode *ifile) 891 { 892 struct nilfs_inode_info *ii; 893 894 list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) { 895 nilfs_fill_in_file_bmap(ifile, ii); 896 set_bit(NILFS_I_COLLECTED, &ii->i_state); 897 } 898 } 899 900 static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci, 901 struct the_nilfs *nilfs) 902 { 903 struct buffer_head *bh_sr; 904 struct nilfs_super_root *raw_sr; 905 unsigned isz = nilfs->ns_inode_size; 906 907 bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root; 908 raw_sr = (struct nilfs_super_root *)bh_sr->b_data; 909 910 raw_sr->sr_bytes = cpu_to_le16(NILFS_SR_BYTES); 911 raw_sr->sr_nongc_ctime 912 = cpu_to_le64(nilfs_doing_gc() ? 913 nilfs->ns_nongc_ctime : sci->sc_seg_ctime); 914 raw_sr->sr_flags = 0; 915 916 nilfs_write_inode_common(nilfs_dat_inode(nilfs), (void *)raw_sr + 917 NILFS_SR_DAT_OFFSET(isz), 1); 918 nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr + 919 NILFS_SR_CPFILE_OFFSET(isz), 1); 920 nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr + 921 NILFS_SR_SUFILE_OFFSET(isz), 1); 922 } 923 924 static void nilfs_redirty_inodes(struct list_head *head) 925 { 926 struct nilfs_inode_info *ii; 927 928 list_for_each_entry(ii, head, i_dirty) { 929 if (test_bit(NILFS_I_COLLECTED, &ii->i_state)) 930 clear_bit(NILFS_I_COLLECTED, &ii->i_state); 931 } 932 } 933 934 static void nilfs_drop_collected_inodes(struct list_head *head) 935 { 936 struct nilfs_inode_info *ii; 937 938 list_for_each_entry(ii, head, i_dirty) { 939 if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state)) 940 continue; 941 942 clear_bit(NILFS_I_INODE_DIRTY, &ii->i_state); 943 set_bit(NILFS_I_UPDATED, &ii->i_state); 944 } 945 } 946 947 static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci, 948 struct inode *inode, 949 struct list_head *listp, 950 int (*collect)(struct nilfs_sc_info *, 951 struct buffer_head *, 952 struct inode *)) 953 { 954 struct buffer_head *bh, *n; 955 int err = 0; 956 957 if (collect) { 958 list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) { 959 list_del_init(&bh->b_assoc_buffers); 960 err = collect(sci, bh, inode); 961 brelse(bh); 962 if (unlikely(err)) 963 goto dispose_buffers; 964 } 965 return 0; 966 } 967 968 dispose_buffers: 969 while (!list_empty(listp)) { 970 bh = list_entry(listp->next, struct buffer_head, 971 b_assoc_buffers); 972 list_del_init(&bh->b_assoc_buffers); 973 brelse(bh); 974 } 975 return err; 976 } 977 978 static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci) 979 { 980 /* Remaining number of blocks within segment buffer */ 981 return sci->sc_segbuf_nblocks - 982 (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks); 983 } 984 985 static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci, 986 struct inode *inode, 987 struct nilfs_sc_operations *sc_ops) 988 { 989 LIST_HEAD(data_buffers); 990 LIST_HEAD(node_buffers); 991 int err; 992 993 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) { 994 size_t n, rest = nilfs_segctor_buffer_rest(sci); 995 996 n = nilfs_lookup_dirty_data_buffers( 997 inode, &data_buffers, rest + 1, 0, LLONG_MAX); 998 if (n > rest) { 999 err = nilfs_segctor_apply_buffers( 1000 sci, inode, &data_buffers, 1001 sc_ops->collect_data); 1002 BUG_ON(!err); /* always receive -E2BIG or true error */ 1003 goto break_or_fail; 1004 } 1005 } 1006 nilfs_lookup_dirty_node_buffers(inode, &node_buffers); 1007 1008 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) { 1009 err = nilfs_segctor_apply_buffers( 1010 sci, inode, &data_buffers, sc_ops->collect_data); 1011 if (unlikely(err)) { 1012 /* dispose node list */ 1013 nilfs_segctor_apply_buffers( 1014 sci, inode, &node_buffers, NULL); 1015 goto break_or_fail; 1016 } 1017 sci->sc_stage.flags |= NILFS_CF_NODE; 1018 } 1019 /* Collect node */ 1020 err = nilfs_segctor_apply_buffers( 1021 sci, inode, &node_buffers, sc_ops->collect_node); 1022 if (unlikely(err)) 1023 goto break_or_fail; 1024 1025 nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers); 1026 err = nilfs_segctor_apply_buffers( 1027 sci, inode, &node_buffers, sc_ops->collect_bmap); 1028 if (unlikely(err)) 1029 goto break_or_fail; 1030 1031 nilfs_segctor_end_finfo(sci, inode); 1032 sci->sc_stage.flags &= ~NILFS_CF_NODE; 1033 1034 break_or_fail: 1035 return err; 1036 } 1037 1038 static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci, 1039 struct inode *inode) 1040 { 1041 LIST_HEAD(data_buffers); 1042 size_t n, rest = nilfs_segctor_buffer_rest(sci); 1043 int err; 1044 1045 n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1, 1046 sci->sc_dsync_start, 1047 sci->sc_dsync_end); 1048 1049 err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers, 1050 nilfs_collect_file_data); 1051 if (!err) { 1052 nilfs_segctor_end_finfo(sci, inode); 1053 BUG_ON(n > rest); 1054 /* always receive -E2BIG or true error if n > rest */ 1055 } 1056 return err; 1057 } 1058 1059 static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) 1060 { 1061 struct nilfs_sb_info *sbi = sci->sc_sbi; 1062 struct the_nilfs *nilfs = sbi->s_nilfs; 1063 struct list_head *head; 1064 struct nilfs_inode_info *ii; 1065 size_t ndone; 1066 int err = 0; 1067 1068 switch (sci->sc_stage.scnt) { 1069 case NILFS_ST_INIT: 1070 /* Pre-processes */ 1071 sci->sc_stage.flags = 0; 1072 1073 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) { 1074 sci->sc_nblk_inc = 0; 1075 sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN; 1076 if (mode == SC_LSEG_DSYNC) { 1077 sci->sc_stage.scnt = NILFS_ST_DSYNC; 1078 goto dsync_mode; 1079 } 1080 } 1081 1082 sci->sc_stage.dirty_file_ptr = NULL; 1083 sci->sc_stage.gc_inode_ptr = NULL; 1084 if (mode == SC_FLUSH_DAT) { 1085 sci->sc_stage.scnt = NILFS_ST_DAT; 1086 goto dat_stage; 1087 } 1088 sci->sc_stage.scnt++; /* Fall through */ 1089 case NILFS_ST_GC: 1090 if (nilfs_doing_gc()) { 1091 head = &sci->sc_gc_inodes; 1092 ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr, 1093 head, i_dirty); 1094 list_for_each_entry_continue(ii, head, i_dirty) { 1095 err = nilfs_segctor_scan_file( 1096 sci, &ii->vfs_inode, 1097 &nilfs_sc_file_ops); 1098 if (unlikely(err)) { 1099 sci->sc_stage.gc_inode_ptr = list_entry( 1100 ii->i_dirty.prev, 1101 struct nilfs_inode_info, 1102 i_dirty); 1103 goto break_or_fail; 1104 } 1105 set_bit(NILFS_I_COLLECTED, &ii->i_state); 1106 } 1107 sci->sc_stage.gc_inode_ptr = NULL; 1108 } 1109 sci->sc_stage.scnt++; /* Fall through */ 1110 case NILFS_ST_FILE: 1111 head = &sci->sc_dirty_files; 1112 ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head, 1113 i_dirty); 1114 list_for_each_entry_continue(ii, head, i_dirty) { 1115 clear_bit(NILFS_I_DIRTY, &ii->i_state); 1116 1117 err = nilfs_segctor_scan_file(sci, &ii->vfs_inode, 1118 &nilfs_sc_file_ops); 1119 if (unlikely(err)) { 1120 sci->sc_stage.dirty_file_ptr = 1121 list_entry(ii->i_dirty.prev, 1122 struct nilfs_inode_info, 1123 i_dirty); 1124 goto break_or_fail; 1125 } 1126 /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */ 1127 /* XXX: required ? */ 1128 } 1129 sci->sc_stage.dirty_file_ptr = NULL; 1130 if (mode == SC_FLUSH_FILE) { 1131 sci->sc_stage.scnt = NILFS_ST_DONE; 1132 return 0; 1133 } 1134 sci->sc_stage.scnt++; 1135 sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED; 1136 /* Fall through */ 1137 case NILFS_ST_IFILE: 1138 err = nilfs_segctor_scan_file(sci, sbi->s_ifile, 1139 &nilfs_sc_file_ops); 1140 if (unlikely(err)) 1141 break; 1142 sci->sc_stage.scnt++; 1143 /* Creating a checkpoint */ 1144 err = nilfs_segctor_create_checkpoint(sci); 1145 if (unlikely(err)) 1146 break; 1147 /* Fall through */ 1148 case NILFS_ST_CPFILE: 1149 err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile, 1150 &nilfs_sc_file_ops); 1151 if (unlikely(err)) 1152 break; 1153 sci->sc_stage.scnt++; /* Fall through */ 1154 case NILFS_ST_SUFILE: 1155 err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs, 1156 sci->sc_nfreesegs, &ndone); 1157 if (unlikely(err)) { 1158 nilfs_sufile_cancel_freev(nilfs->ns_sufile, 1159 sci->sc_freesegs, ndone, 1160 NULL); 1161 break; 1162 } 1163 sci->sc_stage.flags |= NILFS_CF_SUFREED; 1164 1165 err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile, 1166 &nilfs_sc_file_ops); 1167 if (unlikely(err)) 1168 break; 1169 sci->sc_stage.scnt++; /* Fall through */ 1170 case NILFS_ST_DAT: 1171 dat_stage: 1172 err = nilfs_segctor_scan_file(sci, nilfs_dat_inode(nilfs), 1173 &nilfs_sc_dat_ops); 1174 if (unlikely(err)) 1175 break; 1176 if (mode == SC_FLUSH_DAT) { 1177 sci->sc_stage.scnt = NILFS_ST_DONE; 1178 return 0; 1179 } 1180 sci->sc_stage.scnt++; /* Fall through */ 1181 case NILFS_ST_SR: 1182 if (mode == SC_LSEG_SR) { 1183 /* Appending a super root */ 1184 err = nilfs_segctor_add_super_root(sci); 1185 if (unlikely(err)) 1186 break; 1187 } 1188 /* End of a logical segment */ 1189 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND; 1190 sci->sc_stage.scnt = NILFS_ST_DONE; 1191 return 0; 1192 case NILFS_ST_DSYNC: 1193 dsync_mode: 1194 sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT; 1195 ii = sci->sc_dsync_inode; 1196 if (!test_bit(NILFS_I_BUSY, &ii->i_state)) 1197 break; 1198 1199 err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode); 1200 if (unlikely(err)) 1201 break; 1202 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND; 1203 sci->sc_stage.scnt = NILFS_ST_DONE; 1204 return 0; 1205 case NILFS_ST_DONE: 1206 return 0; 1207 default: 1208 BUG(); 1209 } 1210 1211 break_or_fail: 1212 return err; 1213 } 1214 1215 /** 1216 * nilfs_segctor_begin_construction - setup segment buffer to make a new log 1217 * @sci: nilfs_sc_info 1218 * @nilfs: nilfs object 1219 */ 1220 static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci, 1221 struct the_nilfs *nilfs) 1222 { 1223 struct nilfs_segment_buffer *segbuf, *prev; 1224 __u64 nextnum; 1225 int err, alloc = 0; 1226 1227 segbuf = nilfs_segbuf_new(sci->sc_super); 1228 if (unlikely(!segbuf)) 1229 return -ENOMEM; 1230 1231 if (list_empty(&sci->sc_write_logs)) { 1232 nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 1233 nilfs->ns_pseg_offset, nilfs); 1234 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) { 1235 nilfs_shift_to_next_segment(nilfs); 1236 nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs); 1237 } 1238 1239 segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq; 1240 nextnum = nilfs->ns_nextnum; 1241 1242 if (nilfs->ns_segnum == nilfs->ns_nextnum) 1243 /* Start from the head of a new full segment */ 1244 alloc++; 1245 } else { 1246 /* Continue logs */ 1247 prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs); 1248 nilfs_segbuf_map_cont(segbuf, prev); 1249 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq; 1250 nextnum = prev->sb_nextnum; 1251 1252 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) { 1253 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs); 1254 segbuf->sb_sum.seg_seq++; 1255 alloc++; 1256 } 1257 } 1258 1259 err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum); 1260 if (err) 1261 goto failed; 1262 1263 if (alloc) { 1264 err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum); 1265 if (err) 1266 goto failed; 1267 } 1268 nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs); 1269 1270 BUG_ON(!list_empty(&sci->sc_segbufs)); 1271 list_add_tail(&segbuf->sb_list, &sci->sc_segbufs); 1272 sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks; 1273 return 0; 1274 1275 failed: 1276 nilfs_segbuf_free(segbuf); 1277 return err; 1278 } 1279 1280 static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci, 1281 struct the_nilfs *nilfs, int nadd) 1282 { 1283 struct nilfs_segment_buffer *segbuf, *prev; 1284 struct inode *sufile = nilfs->ns_sufile; 1285 __u64 nextnextnum; 1286 LIST_HEAD(list); 1287 int err, ret, i; 1288 1289 prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs); 1290 /* 1291 * Since the segment specified with nextnum might be allocated during 1292 * the previous construction, the buffer including its segusage may 1293 * not be dirty. The following call ensures that the buffer is dirty 1294 * and will pin the buffer on memory until the sufile is written. 1295 */ 1296 err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum); 1297 if (unlikely(err)) 1298 return err; 1299 1300 for (i = 0; i < nadd; i++) { 1301 /* extend segment info */ 1302 err = -ENOMEM; 1303 segbuf = nilfs_segbuf_new(sci->sc_super); 1304 if (unlikely(!segbuf)) 1305 goto failed; 1306 1307 /* map this buffer to region of segment on-disk */ 1308 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs); 1309 sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks; 1310 1311 /* allocate the next next full segment */ 1312 err = nilfs_sufile_alloc(sufile, &nextnextnum); 1313 if (unlikely(err)) 1314 goto failed_segbuf; 1315 1316 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1; 1317 nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs); 1318 1319 list_add_tail(&segbuf->sb_list, &list); 1320 prev = segbuf; 1321 } 1322 list_splice_tail(&list, &sci->sc_segbufs); 1323 return 0; 1324 1325 failed_segbuf: 1326 nilfs_segbuf_free(segbuf); 1327 failed: 1328 list_for_each_entry(segbuf, &list, sb_list) { 1329 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum); 1330 WARN_ON(ret); /* never fails */ 1331 } 1332 nilfs_destroy_logs(&list); 1333 return err; 1334 } 1335 1336 static void nilfs_free_incomplete_logs(struct list_head *logs, 1337 struct the_nilfs *nilfs) 1338 { 1339 struct nilfs_segment_buffer *segbuf, *prev; 1340 struct inode *sufile = nilfs->ns_sufile; 1341 int ret; 1342 1343 segbuf = NILFS_FIRST_SEGBUF(logs); 1344 if (nilfs->ns_nextnum != segbuf->sb_nextnum) { 1345 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum); 1346 WARN_ON(ret); /* never fails */ 1347 } 1348 if (atomic_read(&segbuf->sb_err)) { 1349 /* Case 1: The first segment failed */ 1350 if (segbuf->sb_pseg_start != segbuf->sb_fseg_start) 1351 /* Case 1a: Partial segment appended into an existing 1352 segment */ 1353 nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start, 1354 segbuf->sb_fseg_end); 1355 else /* Case 1b: New full segment */ 1356 set_nilfs_discontinued(nilfs); 1357 } 1358 1359 prev = segbuf; 1360 list_for_each_entry_continue(segbuf, logs, sb_list) { 1361 if (prev->sb_nextnum != segbuf->sb_nextnum) { 1362 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum); 1363 WARN_ON(ret); /* never fails */ 1364 } 1365 if (atomic_read(&segbuf->sb_err) && 1366 segbuf->sb_segnum != nilfs->ns_nextnum) 1367 /* Case 2: extended segment (!= next) failed */ 1368 nilfs_sufile_set_error(sufile, segbuf->sb_segnum); 1369 prev = segbuf; 1370 } 1371 } 1372 1373 static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci, 1374 struct inode *sufile) 1375 { 1376 struct nilfs_segment_buffer *segbuf; 1377 unsigned long live_blocks; 1378 int ret; 1379 1380 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { 1381 live_blocks = segbuf->sb_sum.nblocks + 1382 (segbuf->sb_pseg_start - segbuf->sb_fseg_start); 1383 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum, 1384 live_blocks, 1385 sci->sc_seg_ctime); 1386 WARN_ON(ret); /* always succeed because the segusage is dirty */ 1387 } 1388 } 1389 1390 static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile) 1391 { 1392 struct nilfs_segment_buffer *segbuf; 1393 int ret; 1394 1395 segbuf = NILFS_FIRST_SEGBUF(logs); 1396 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum, 1397 segbuf->sb_pseg_start - 1398 segbuf->sb_fseg_start, 0); 1399 WARN_ON(ret); /* always succeed because the segusage is dirty */ 1400 1401 list_for_each_entry_continue(segbuf, logs, sb_list) { 1402 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum, 1403 0, 0); 1404 WARN_ON(ret); /* always succeed */ 1405 } 1406 } 1407 1408 static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci, 1409 struct nilfs_segment_buffer *last, 1410 struct inode *sufile) 1411 { 1412 struct nilfs_segment_buffer *segbuf = last; 1413 int ret; 1414 1415 list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) { 1416 sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks; 1417 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum); 1418 WARN_ON(ret); 1419 } 1420 nilfs_truncate_logs(&sci->sc_segbufs, last); 1421 } 1422 1423 1424 static int nilfs_segctor_collect(struct nilfs_sc_info *sci, 1425 struct the_nilfs *nilfs, int mode) 1426 { 1427 struct nilfs_cstage prev_stage = sci->sc_stage; 1428 int err, nadd = 1; 1429 1430 /* Collection retry loop */ 1431 for (;;) { 1432 sci->sc_nblk_this_inc = 0; 1433 sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs); 1434 1435 err = nilfs_segctor_reset_segment_buffer(sci); 1436 if (unlikely(err)) 1437 goto failed; 1438 1439 err = nilfs_segctor_collect_blocks(sci, mode); 1440 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks; 1441 if (!err) 1442 break; 1443 1444 if (unlikely(err != -E2BIG)) 1445 goto failed; 1446 1447 /* The current segment is filled up */ 1448 if (mode != SC_LSEG_SR || sci->sc_stage.scnt < NILFS_ST_CPFILE) 1449 break; 1450 1451 nilfs_clear_logs(&sci->sc_segbufs); 1452 1453 err = nilfs_segctor_extend_segments(sci, nilfs, nadd); 1454 if (unlikely(err)) 1455 return err; 1456 1457 if (sci->sc_stage.flags & NILFS_CF_SUFREED) { 1458 err = nilfs_sufile_cancel_freev(nilfs->ns_sufile, 1459 sci->sc_freesegs, 1460 sci->sc_nfreesegs, 1461 NULL); 1462 WARN_ON(err); /* do not happen */ 1463 } 1464 nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA); 1465 sci->sc_stage = prev_stage; 1466 } 1467 nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile); 1468 return 0; 1469 1470 failed: 1471 return err; 1472 } 1473 1474 static void nilfs_list_replace_buffer(struct buffer_head *old_bh, 1475 struct buffer_head *new_bh) 1476 { 1477 BUG_ON(!list_empty(&new_bh->b_assoc_buffers)); 1478 1479 list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers); 1480 /* The caller must release old_bh */ 1481 } 1482 1483 static int 1484 nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci, 1485 struct nilfs_segment_buffer *segbuf, 1486 int mode) 1487 { 1488 struct inode *inode = NULL; 1489 sector_t blocknr; 1490 unsigned long nfinfo = segbuf->sb_sum.nfinfo; 1491 unsigned long nblocks = 0, ndatablk = 0; 1492 struct nilfs_sc_operations *sc_op = NULL; 1493 struct nilfs_segsum_pointer ssp; 1494 struct nilfs_finfo *finfo = NULL; 1495 union nilfs_binfo binfo; 1496 struct buffer_head *bh, *bh_org; 1497 ino_t ino = 0; 1498 int err = 0; 1499 1500 if (!nfinfo) 1501 goto out; 1502 1503 blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk; 1504 ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers); 1505 ssp.offset = sizeof(struct nilfs_segment_summary); 1506 1507 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { 1508 if (bh == segbuf->sb_super_root) 1509 break; 1510 if (!finfo) { 1511 finfo = nilfs_segctor_map_segsum_entry( 1512 sci, &ssp, sizeof(*finfo)); 1513 ino = le64_to_cpu(finfo->fi_ino); 1514 nblocks = le32_to_cpu(finfo->fi_nblocks); 1515 ndatablk = le32_to_cpu(finfo->fi_ndatablk); 1516 1517 if (buffer_nilfs_node(bh)) 1518 inode = NILFS_BTNC_I(bh->b_page->mapping); 1519 else 1520 inode = NILFS_AS_I(bh->b_page->mapping); 1521 1522 if (mode == SC_LSEG_DSYNC) 1523 sc_op = &nilfs_sc_dsync_ops; 1524 else if (ino == NILFS_DAT_INO) 1525 sc_op = &nilfs_sc_dat_ops; 1526 else /* file blocks */ 1527 sc_op = &nilfs_sc_file_ops; 1528 } 1529 bh_org = bh; 1530 get_bh(bh_org); 1531 err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr, 1532 &binfo); 1533 if (bh != bh_org) 1534 nilfs_list_replace_buffer(bh_org, bh); 1535 brelse(bh_org); 1536 if (unlikely(err)) 1537 goto failed_bmap; 1538 1539 if (ndatablk > 0) 1540 sc_op->write_data_binfo(sci, &ssp, &binfo); 1541 else 1542 sc_op->write_node_binfo(sci, &ssp, &binfo); 1543 1544 blocknr++; 1545 if (--nblocks == 0) { 1546 finfo = NULL; 1547 if (--nfinfo == 0) 1548 break; 1549 } else if (ndatablk > 0) 1550 ndatablk--; 1551 } 1552 out: 1553 return 0; 1554 1555 failed_bmap: 1556 err = nilfs_handle_bmap_error(err, __func__, inode, sci->sc_super); 1557 return err; 1558 } 1559 1560 static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode) 1561 { 1562 struct nilfs_segment_buffer *segbuf; 1563 int err; 1564 1565 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { 1566 err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode); 1567 if (unlikely(err)) 1568 return err; 1569 nilfs_segbuf_fill_in_segsum(segbuf); 1570 } 1571 return 0; 1572 } 1573 1574 static int 1575 nilfs_copy_replace_page_buffers(struct page *page, struct list_head *out) 1576 { 1577 struct page *clone_page; 1578 struct buffer_head *bh, *head, *bh2; 1579 void *kaddr; 1580 1581 bh = head = page_buffers(page); 1582 1583 clone_page = nilfs_alloc_private_page(bh->b_bdev, bh->b_size, 0); 1584 if (unlikely(!clone_page)) 1585 return -ENOMEM; 1586 1587 bh2 = page_buffers(clone_page); 1588 kaddr = kmap_atomic(page, KM_USER0); 1589 do { 1590 if (list_empty(&bh->b_assoc_buffers)) 1591 continue; 1592 get_bh(bh2); 1593 page_cache_get(clone_page); /* for each bh */ 1594 memcpy(bh2->b_data, kaddr + bh_offset(bh), bh2->b_size); 1595 bh2->b_blocknr = bh->b_blocknr; 1596 list_replace(&bh->b_assoc_buffers, &bh2->b_assoc_buffers); 1597 list_add_tail(&bh->b_assoc_buffers, out); 1598 } while (bh = bh->b_this_page, bh2 = bh2->b_this_page, bh != head); 1599 kunmap_atomic(kaddr, KM_USER0); 1600 1601 if (!TestSetPageWriteback(clone_page)) 1602 inc_zone_page_state(clone_page, NR_WRITEBACK); 1603 unlock_page(clone_page); 1604 1605 return 0; 1606 } 1607 1608 static int nilfs_test_page_to_be_frozen(struct page *page) 1609 { 1610 struct address_space *mapping = page->mapping; 1611 1612 if (!mapping || !mapping->host || S_ISDIR(mapping->host->i_mode)) 1613 return 0; 1614 1615 if (page_mapped(page)) { 1616 ClearPageChecked(page); 1617 return 1; 1618 } 1619 return PageChecked(page); 1620 } 1621 1622 static int nilfs_begin_page_io(struct page *page, struct list_head *out) 1623 { 1624 if (!page || PageWriteback(page)) 1625 /* For split b-tree node pages, this function may be called 1626 twice. We ignore the 2nd or later calls by this check. */ 1627 return 0; 1628 1629 lock_page(page); 1630 clear_page_dirty_for_io(page); 1631 set_page_writeback(page); 1632 unlock_page(page); 1633 1634 if (nilfs_test_page_to_be_frozen(page)) { 1635 int err = nilfs_copy_replace_page_buffers(page, out); 1636 if (unlikely(err)) 1637 return err; 1638 } 1639 return 0; 1640 } 1641 1642 static int nilfs_segctor_prepare_write(struct nilfs_sc_info *sci, 1643 struct page **failed_page) 1644 { 1645 struct nilfs_segment_buffer *segbuf; 1646 struct page *bd_page = NULL, *fs_page = NULL; 1647 struct list_head *list = &sci->sc_copied_buffers; 1648 int err; 1649 1650 *failed_page = NULL; 1651 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { 1652 struct buffer_head *bh; 1653 1654 list_for_each_entry(bh, &segbuf->sb_segsum_buffers, 1655 b_assoc_buffers) { 1656 if (bh->b_page != bd_page) { 1657 if (bd_page) { 1658 lock_page(bd_page); 1659 clear_page_dirty_for_io(bd_page); 1660 set_page_writeback(bd_page); 1661 unlock_page(bd_page); 1662 } 1663 bd_page = bh->b_page; 1664 } 1665 } 1666 1667 list_for_each_entry(bh, &segbuf->sb_payload_buffers, 1668 b_assoc_buffers) { 1669 if (bh == segbuf->sb_super_root) { 1670 if (bh->b_page != bd_page) { 1671 lock_page(bd_page); 1672 clear_page_dirty_for_io(bd_page); 1673 set_page_writeback(bd_page); 1674 unlock_page(bd_page); 1675 bd_page = bh->b_page; 1676 } 1677 break; 1678 } 1679 if (bh->b_page != fs_page) { 1680 err = nilfs_begin_page_io(fs_page, list); 1681 if (unlikely(err)) { 1682 *failed_page = fs_page; 1683 goto out; 1684 } 1685 fs_page = bh->b_page; 1686 } 1687 } 1688 } 1689 if (bd_page) { 1690 lock_page(bd_page); 1691 clear_page_dirty_for_io(bd_page); 1692 set_page_writeback(bd_page); 1693 unlock_page(bd_page); 1694 } 1695 err = nilfs_begin_page_io(fs_page, list); 1696 if (unlikely(err)) 1697 *failed_page = fs_page; 1698 out: 1699 return err; 1700 } 1701 1702 static int nilfs_segctor_write(struct nilfs_sc_info *sci, 1703 struct the_nilfs *nilfs) 1704 { 1705 int ret; 1706 1707 ret = nilfs_write_logs(&sci->sc_segbufs, nilfs); 1708 list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs); 1709 return ret; 1710 } 1711 1712 static void __nilfs_end_page_io(struct page *page, int err) 1713 { 1714 if (!err) { 1715 if (!nilfs_page_buffers_clean(page)) 1716 __set_page_dirty_nobuffers(page); 1717 ClearPageError(page); 1718 } else { 1719 __set_page_dirty_nobuffers(page); 1720 SetPageError(page); 1721 } 1722 1723 if (buffer_nilfs_allocated(page_buffers(page))) { 1724 if (TestClearPageWriteback(page)) 1725 dec_zone_page_state(page, NR_WRITEBACK); 1726 } else 1727 end_page_writeback(page); 1728 } 1729 1730 static void nilfs_end_page_io(struct page *page, int err) 1731 { 1732 if (!page) 1733 return; 1734 1735 if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) { 1736 /* 1737 * For b-tree node pages, this function may be called twice 1738 * or more because they might be split in a segment. 1739 */ 1740 if (PageDirty(page)) { 1741 /* 1742 * For pages holding split b-tree node buffers, dirty 1743 * flag on the buffers may be cleared discretely. 1744 * In that case, the page is once redirtied for 1745 * remaining buffers, and it must be cancelled if 1746 * all the buffers get cleaned later. 1747 */ 1748 lock_page(page); 1749 if (nilfs_page_buffers_clean(page)) 1750 __nilfs_clear_page_dirty(page); 1751 unlock_page(page); 1752 } 1753 return; 1754 } 1755 1756 __nilfs_end_page_io(page, err); 1757 } 1758 1759 static void nilfs_clear_copied_buffers(struct list_head *list, int err) 1760 { 1761 struct buffer_head *bh, *head; 1762 struct page *page; 1763 1764 while (!list_empty(list)) { 1765 bh = list_entry(list->next, struct buffer_head, 1766 b_assoc_buffers); 1767 page = bh->b_page; 1768 page_cache_get(page); 1769 head = bh = page_buffers(page); 1770 do { 1771 if (!list_empty(&bh->b_assoc_buffers)) { 1772 list_del_init(&bh->b_assoc_buffers); 1773 if (!err) { 1774 set_buffer_uptodate(bh); 1775 clear_buffer_dirty(bh); 1776 clear_buffer_nilfs_volatile(bh); 1777 } 1778 brelse(bh); /* for b_assoc_buffers */ 1779 } 1780 } while ((bh = bh->b_this_page) != head); 1781 1782 __nilfs_end_page_io(page, err); 1783 page_cache_release(page); 1784 } 1785 } 1786 1787 static void nilfs_abort_logs(struct list_head *logs, struct page *failed_page, 1788 int err) 1789 { 1790 struct nilfs_segment_buffer *segbuf; 1791 struct page *bd_page = NULL, *fs_page = NULL; 1792 struct buffer_head *bh; 1793 1794 if (list_empty(logs)) 1795 return; 1796 1797 list_for_each_entry(segbuf, logs, sb_list) { 1798 list_for_each_entry(bh, &segbuf->sb_segsum_buffers, 1799 b_assoc_buffers) { 1800 if (bh->b_page != bd_page) { 1801 if (bd_page) 1802 end_page_writeback(bd_page); 1803 bd_page = bh->b_page; 1804 } 1805 } 1806 1807 list_for_each_entry(bh, &segbuf->sb_payload_buffers, 1808 b_assoc_buffers) { 1809 if (bh == segbuf->sb_super_root) { 1810 if (bh->b_page != bd_page) { 1811 end_page_writeback(bd_page); 1812 bd_page = bh->b_page; 1813 } 1814 break; 1815 } 1816 if (bh->b_page != fs_page) { 1817 nilfs_end_page_io(fs_page, err); 1818 if (fs_page && fs_page == failed_page) 1819 return; 1820 fs_page = bh->b_page; 1821 } 1822 } 1823 } 1824 if (bd_page) 1825 end_page_writeback(bd_page); 1826 1827 nilfs_end_page_io(fs_page, err); 1828 } 1829 1830 static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci, 1831 struct the_nilfs *nilfs, int err) 1832 { 1833 LIST_HEAD(logs); 1834 int ret; 1835 1836 list_splice_tail_init(&sci->sc_write_logs, &logs); 1837 ret = nilfs_wait_on_logs(&logs); 1838 nilfs_abort_logs(&logs, NULL, ret ? : err); 1839 1840 list_splice_tail_init(&sci->sc_segbufs, &logs); 1841 nilfs_cancel_segusage(&logs, nilfs->ns_sufile); 1842 nilfs_free_incomplete_logs(&logs, nilfs); 1843 nilfs_clear_copied_buffers(&sci->sc_copied_buffers, err); 1844 1845 if (sci->sc_stage.flags & NILFS_CF_SUFREED) { 1846 ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile, 1847 sci->sc_freesegs, 1848 sci->sc_nfreesegs, 1849 NULL); 1850 WARN_ON(ret); /* do not happen */ 1851 } 1852 1853 nilfs_destroy_logs(&logs); 1854 } 1855 1856 static void nilfs_set_next_segment(struct the_nilfs *nilfs, 1857 struct nilfs_segment_buffer *segbuf) 1858 { 1859 nilfs->ns_segnum = segbuf->sb_segnum; 1860 nilfs->ns_nextnum = segbuf->sb_nextnum; 1861 nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start 1862 + segbuf->sb_sum.nblocks; 1863 nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq; 1864 nilfs->ns_ctime = segbuf->sb_sum.ctime; 1865 } 1866 1867 static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci) 1868 { 1869 struct nilfs_segment_buffer *segbuf; 1870 struct page *bd_page = NULL, *fs_page = NULL; 1871 struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs; 1872 int update_sr = false; 1873 1874 list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) { 1875 struct buffer_head *bh; 1876 1877 list_for_each_entry(bh, &segbuf->sb_segsum_buffers, 1878 b_assoc_buffers) { 1879 set_buffer_uptodate(bh); 1880 clear_buffer_dirty(bh); 1881 if (bh->b_page != bd_page) { 1882 if (bd_page) 1883 end_page_writeback(bd_page); 1884 bd_page = bh->b_page; 1885 } 1886 } 1887 /* 1888 * We assume that the buffers which belong to the same page 1889 * continue over the buffer list. 1890 * Under this assumption, the last BHs of pages is 1891 * identifiable by the discontinuity of bh->b_page 1892 * (page != fs_page). 1893 * 1894 * For B-tree node blocks, however, this assumption is not 1895 * guaranteed. The cleanup code of B-tree node pages needs 1896 * special care. 1897 */ 1898 list_for_each_entry(bh, &segbuf->sb_payload_buffers, 1899 b_assoc_buffers) { 1900 set_buffer_uptodate(bh); 1901 clear_buffer_dirty(bh); 1902 clear_buffer_nilfs_volatile(bh); 1903 if (bh == segbuf->sb_super_root) { 1904 if (bh->b_page != bd_page) { 1905 end_page_writeback(bd_page); 1906 bd_page = bh->b_page; 1907 } 1908 update_sr = true; 1909 break; 1910 } 1911 if (bh->b_page != fs_page) { 1912 nilfs_end_page_io(fs_page, 0); 1913 fs_page = bh->b_page; 1914 } 1915 } 1916 1917 if (!NILFS_SEG_SIMPLEX(&segbuf->sb_sum)) { 1918 if (NILFS_SEG_LOGBGN(&segbuf->sb_sum)) { 1919 set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags); 1920 sci->sc_lseg_stime = jiffies; 1921 } 1922 if (NILFS_SEG_LOGEND(&segbuf->sb_sum)) 1923 clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags); 1924 } 1925 } 1926 /* 1927 * Since pages may continue over multiple segment buffers, 1928 * end of the last page must be checked outside of the loop. 1929 */ 1930 if (bd_page) 1931 end_page_writeback(bd_page); 1932 1933 nilfs_end_page_io(fs_page, 0); 1934 1935 nilfs_clear_copied_buffers(&sci->sc_copied_buffers, 0); 1936 1937 nilfs_drop_collected_inodes(&sci->sc_dirty_files); 1938 1939 if (nilfs_doing_gc()) { 1940 nilfs_drop_collected_inodes(&sci->sc_gc_inodes); 1941 if (update_sr) 1942 nilfs_commit_gcdat_inode(nilfs); 1943 } else 1944 nilfs->ns_nongc_ctime = sci->sc_seg_ctime; 1945 1946 sci->sc_nblk_inc += sci->sc_nblk_this_inc; 1947 1948 segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs); 1949 nilfs_set_next_segment(nilfs, segbuf); 1950 1951 if (update_sr) { 1952 nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start, 1953 segbuf->sb_sum.seg_seq, nilfs->ns_cno++); 1954 set_nilfs_sb_dirty(nilfs); 1955 1956 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags); 1957 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags); 1958 set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags); 1959 nilfs_segctor_clear_metadata_dirty(sci); 1960 } else 1961 clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags); 1962 } 1963 1964 static int nilfs_segctor_wait(struct nilfs_sc_info *sci) 1965 { 1966 int ret; 1967 1968 ret = nilfs_wait_on_logs(&sci->sc_write_logs); 1969 if (!ret) { 1970 nilfs_segctor_complete_write(sci); 1971 nilfs_destroy_logs(&sci->sc_write_logs); 1972 } 1973 return ret; 1974 } 1975 1976 static int nilfs_segctor_check_in_files(struct nilfs_sc_info *sci, 1977 struct nilfs_sb_info *sbi) 1978 { 1979 struct nilfs_inode_info *ii, *n; 1980 __u64 cno = sbi->s_nilfs->ns_cno; 1981 1982 spin_lock(&sbi->s_inode_lock); 1983 retry: 1984 list_for_each_entry_safe(ii, n, &sbi->s_dirty_files, i_dirty) { 1985 if (!ii->i_bh) { 1986 struct buffer_head *ibh; 1987 int err; 1988 1989 spin_unlock(&sbi->s_inode_lock); 1990 err = nilfs_ifile_get_inode_block( 1991 sbi->s_ifile, ii->vfs_inode.i_ino, &ibh); 1992 if (unlikely(err)) { 1993 nilfs_warning(sbi->s_super, __func__, 1994 "failed to get inode block.\n"); 1995 return err; 1996 } 1997 nilfs_mdt_mark_buffer_dirty(ibh); 1998 nilfs_mdt_mark_dirty(sbi->s_ifile); 1999 spin_lock(&sbi->s_inode_lock); 2000 if (likely(!ii->i_bh)) 2001 ii->i_bh = ibh; 2002 else 2003 brelse(ibh); 2004 goto retry; 2005 } 2006 ii->i_cno = cno; 2007 2008 clear_bit(NILFS_I_QUEUED, &ii->i_state); 2009 set_bit(NILFS_I_BUSY, &ii->i_state); 2010 list_del(&ii->i_dirty); 2011 list_add_tail(&ii->i_dirty, &sci->sc_dirty_files); 2012 } 2013 spin_unlock(&sbi->s_inode_lock); 2014 2015 NILFS_I(sbi->s_ifile)->i_cno = cno; 2016 2017 return 0; 2018 } 2019 2020 static void nilfs_segctor_check_out_files(struct nilfs_sc_info *sci, 2021 struct nilfs_sb_info *sbi) 2022 { 2023 struct nilfs_transaction_info *ti = current->journal_info; 2024 struct nilfs_inode_info *ii, *n; 2025 __u64 cno = sbi->s_nilfs->ns_cno; 2026 2027 spin_lock(&sbi->s_inode_lock); 2028 list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) { 2029 if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) || 2030 test_bit(NILFS_I_DIRTY, &ii->i_state)) { 2031 /* The current checkpoint number (=nilfs->ns_cno) is 2032 changed between check-in and check-out only if the 2033 super root is written out. So, we can update i_cno 2034 for the inodes that remain in the dirty list. */ 2035 ii->i_cno = cno; 2036 continue; 2037 } 2038 clear_bit(NILFS_I_BUSY, &ii->i_state); 2039 brelse(ii->i_bh); 2040 ii->i_bh = NULL; 2041 list_del(&ii->i_dirty); 2042 list_add_tail(&ii->i_dirty, &ti->ti_garbage); 2043 } 2044 spin_unlock(&sbi->s_inode_lock); 2045 } 2046 2047 /* 2048 * Main procedure of segment constructor 2049 */ 2050 static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode) 2051 { 2052 struct nilfs_sb_info *sbi = sci->sc_sbi; 2053 struct the_nilfs *nilfs = sbi->s_nilfs; 2054 struct page *failed_page; 2055 int err; 2056 2057 sci->sc_stage.scnt = NILFS_ST_INIT; 2058 2059 err = nilfs_segctor_check_in_files(sci, sbi); 2060 if (unlikely(err)) 2061 goto out; 2062 2063 if (nilfs_test_metadata_dirty(sbi)) 2064 set_bit(NILFS_SC_DIRTY, &sci->sc_flags); 2065 2066 if (nilfs_segctor_clean(sci)) 2067 goto out; 2068 2069 do { 2070 sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK; 2071 2072 err = nilfs_segctor_begin_construction(sci, nilfs); 2073 if (unlikely(err)) 2074 goto out; 2075 2076 /* Update time stamp */ 2077 sci->sc_seg_ctime = get_seconds(); 2078 2079 err = nilfs_segctor_collect(sci, nilfs, mode); 2080 if (unlikely(err)) 2081 goto failed; 2082 2083 /* Avoid empty segment */ 2084 if (sci->sc_stage.scnt == NILFS_ST_DONE && 2085 NILFS_SEG_EMPTY(&sci->sc_curseg->sb_sum)) { 2086 nilfs_segctor_abort_construction(sci, nilfs, 1); 2087 goto out; 2088 } 2089 2090 err = nilfs_segctor_assign(sci, mode); 2091 if (unlikely(err)) 2092 goto failed; 2093 2094 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED) 2095 nilfs_segctor_fill_in_file_bmap(sci, sbi->s_ifile); 2096 2097 if (mode == SC_LSEG_SR && 2098 sci->sc_stage.scnt >= NILFS_ST_CPFILE) { 2099 err = nilfs_segctor_fill_in_checkpoint(sci); 2100 if (unlikely(err)) 2101 goto failed_to_write; 2102 2103 nilfs_segctor_fill_in_super_root(sci, nilfs); 2104 } 2105 nilfs_segctor_update_segusage(sci, nilfs->ns_sufile); 2106 2107 /* Write partial segments */ 2108 err = nilfs_segctor_prepare_write(sci, &failed_page); 2109 if (err) { 2110 nilfs_abort_logs(&sci->sc_segbufs, failed_page, err); 2111 goto failed_to_write; 2112 } 2113 2114 nilfs_add_checksums_on_logs(&sci->sc_segbufs, 2115 nilfs->ns_crc_seed); 2116 2117 err = nilfs_segctor_write(sci, nilfs); 2118 if (unlikely(err)) 2119 goto failed_to_write; 2120 2121 if (sci->sc_stage.scnt == NILFS_ST_DONE || 2122 nilfs->ns_blocksize_bits != PAGE_CACHE_SHIFT) { 2123 /* 2124 * At this point, we avoid double buffering 2125 * for blocksize < pagesize because page dirty 2126 * flag is turned off during write and dirty 2127 * buffers are not properly collected for 2128 * pages crossing over segments. 2129 */ 2130 err = nilfs_segctor_wait(sci); 2131 if (err) 2132 goto failed_to_write; 2133 } 2134 } while (sci->sc_stage.scnt != NILFS_ST_DONE); 2135 2136 out: 2137 nilfs_segctor_check_out_files(sci, sbi); 2138 return err; 2139 2140 failed_to_write: 2141 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED) 2142 nilfs_redirty_inodes(&sci->sc_dirty_files); 2143 2144 failed: 2145 if (nilfs_doing_gc()) 2146 nilfs_redirty_inodes(&sci->sc_gc_inodes); 2147 nilfs_segctor_abort_construction(sci, nilfs, err); 2148 goto out; 2149 } 2150 2151 /** 2152 * nilfs_segctor_start_timer - set timer of background write 2153 * @sci: nilfs_sc_info 2154 * 2155 * If the timer has already been set, it ignores the new request. 2156 * This function MUST be called within a section locking the segment 2157 * semaphore. 2158 */ 2159 static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci) 2160 { 2161 spin_lock(&sci->sc_state_lock); 2162 if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) { 2163 sci->sc_timer.expires = jiffies + sci->sc_interval; 2164 add_timer(&sci->sc_timer); 2165 sci->sc_state |= NILFS_SEGCTOR_COMMIT; 2166 } 2167 spin_unlock(&sci->sc_state_lock); 2168 } 2169 2170 static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn) 2171 { 2172 spin_lock(&sci->sc_state_lock); 2173 if (!(sci->sc_flush_request & (1 << bn))) { 2174 unsigned long prev_req = sci->sc_flush_request; 2175 2176 sci->sc_flush_request |= (1 << bn); 2177 if (!prev_req) 2178 wake_up(&sci->sc_wait_daemon); 2179 } 2180 spin_unlock(&sci->sc_state_lock); 2181 } 2182 2183 /** 2184 * nilfs_flush_segment - trigger a segment construction for resource control 2185 * @sb: super block 2186 * @ino: inode number of the file to be flushed out. 2187 */ 2188 void nilfs_flush_segment(struct super_block *sb, ino_t ino) 2189 { 2190 struct nilfs_sb_info *sbi = NILFS_SB(sb); 2191 struct nilfs_sc_info *sci = NILFS_SC(sbi); 2192 2193 if (!sci || nilfs_doing_construction()) 2194 return; 2195 nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0); 2196 /* assign bit 0 to data files */ 2197 } 2198 2199 struct nilfs_segctor_wait_request { 2200 wait_queue_t wq; 2201 __u32 seq; 2202 int err; 2203 atomic_t done; 2204 }; 2205 2206 static int nilfs_segctor_sync(struct nilfs_sc_info *sci) 2207 { 2208 struct nilfs_segctor_wait_request wait_req; 2209 int err = 0; 2210 2211 spin_lock(&sci->sc_state_lock); 2212 init_wait(&wait_req.wq); 2213 wait_req.err = 0; 2214 atomic_set(&wait_req.done, 0); 2215 wait_req.seq = ++sci->sc_seq_request; 2216 spin_unlock(&sci->sc_state_lock); 2217 2218 init_waitqueue_entry(&wait_req.wq, current); 2219 add_wait_queue(&sci->sc_wait_request, &wait_req.wq); 2220 set_current_state(TASK_INTERRUPTIBLE); 2221 wake_up(&sci->sc_wait_daemon); 2222 2223 for (;;) { 2224 if (atomic_read(&wait_req.done)) { 2225 err = wait_req.err; 2226 break; 2227 } 2228 if (!signal_pending(current)) { 2229 schedule(); 2230 continue; 2231 } 2232 err = -ERESTARTSYS; 2233 break; 2234 } 2235 finish_wait(&sci->sc_wait_request, &wait_req.wq); 2236 return err; 2237 } 2238 2239 static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err) 2240 { 2241 struct nilfs_segctor_wait_request *wrq, *n; 2242 unsigned long flags; 2243 2244 spin_lock_irqsave(&sci->sc_wait_request.lock, flags); 2245 list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.task_list, 2246 wq.task_list) { 2247 if (!atomic_read(&wrq->done) && 2248 nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) { 2249 wrq->err = err; 2250 atomic_set(&wrq->done, 1); 2251 } 2252 if (atomic_read(&wrq->done)) { 2253 wrq->wq.func(&wrq->wq, 2254 TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 2255 0, NULL); 2256 } 2257 } 2258 spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags); 2259 } 2260 2261 /** 2262 * nilfs_construct_segment - construct a logical segment 2263 * @sb: super block 2264 * 2265 * Return Value: On success, 0 is retured. On errors, one of the following 2266 * negative error code is returned. 2267 * 2268 * %-EROFS - Read only filesystem. 2269 * 2270 * %-EIO - I/O error 2271 * 2272 * %-ENOSPC - No space left on device (only in a panic state). 2273 * 2274 * %-ERESTARTSYS - Interrupted. 2275 * 2276 * %-ENOMEM - Insufficient memory available. 2277 */ 2278 int nilfs_construct_segment(struct super_block *sb) 2279 { 2280 struct nilfs_sb_info *sbi = NILFS_SB(sb); 2281 struct nilfs_sc_info *sci = NILFS_SC(sbi); 2282 struct nilfs_transaction_info *ti; 2283 int err; 2284 2285 if (!sci) 2286 return -EROFS; 2287 2288 /* A call inside transactions causes a deadlock. */ 2289 BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC); 2290 2291 err = nilfs_segctor_sync(sci); 2292 return err; 2293 } 2294 2295 /** 2296 * nilfs_construct_dsync_segment - construct a data-only logical segment 2297 * @sb: super block 2298 * @inode: inode whose data blocks should be written out 2299 * @start: start byte offset 2300 * @end: end byte offset (inclusive) 2301 * 2302 * Return Value: On success, 0 is retured. On errors, one of the following 2303 * negative error code is returned. 2304 * 2305 * %-EROFS - Read only filesystem. 2306 * 2307 * %-EIO - I/O error 2308 * 2309 * %-ENOSPC - No space left on device (only in a panic state). 2310 * 2311 * %-ERESTARTSYS - Interrupted. 2312 * 2313 * %-ENOMEM - Insufficient memory available. 2314 */ 2315 int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode, 2316 loff_t start, loff_t end) 2317 { 2318 struct nilfs_sb_info *sbi = NILFS_SB(sb); 2319 struct nilfs_sc_info *sci = NILFS_SC(sbi); 2320 struct nilfs_inode_info *ii; 2321 struct nilfs_transaction_info ti; 2322 int err = 0; 2323 2324 if (!sci) 2325 return -EROFS; 2326 2327 nilfs_transaction_lock(sbi, &ti, 0); 2328 2329 ii = NILFS_I(inode); 2330 if (test_bit(NILFS_I_INODE_DIRTY, &ii->i_state) || 2331 nilfs_test_opt(sbi, STRICT_ORDER) || 2332 test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) || 2333 nilfs_discontinued(sbi->s_nilfs)) { 2334 nilfs_transaction_unlock(sbi); 2335 err = nilfs_segctor_sync(sci); 2336 return err; 2337 } 2338 2339 spin_lock(&sbi->s_inode_lock); 2340 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) && 2341 !test_bit(NILFS_I_BUSY, &ii->i_state)) { 2342 spin_unlock(&sbi->s_inode_lock); 2343 nilfs_transaction_unlock(sbi); 2344 return 0; 2345 } 2346 spin_unlock(&sbi->s_inode_lock); 2347 sci->sc_dsync_inode = ii; 2348 sci->sc_dsync_start = start; 2349 sci->sc_dsync_end = end; 2350 2351 err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC); 2352 2353 nilfs_transaction_unlock(sbi); 2354 return err; 2355 } 2356 2357 #define FLUSH_FILE_BIT (0x1) /* data file only */ 2358 #define FLUSH_DAT_BIT (1 << NILFS_DAT_INO) /* DAT only */ 2359 2360 /** 2361 * nilfs_segctor_accept - record accepted sequence count of log-write requests 2362 * @sci: segment constructor object 2363 */ 2364 static void nilfs_segctor_accept(struct nilfs_sc_info *sci) 2365 { 2366 spin_lock(&sci->sc_state_lock); 2367 sci->sc_seq_accepted = sci->sc_seq_request; 2368 spin_unlock(&sci->sc_state_lock); 2369 del_timer_sync(&sci->sc_timer); 2370 } 2371 2372 /** 2373 * nilfs_segctor_notify - notify the result of request to caller threads 2374 * @sci: segment constructor object 2375 * @mode: mode of log forming 2376 * @err: error code to be notified 2377 */ 2378 static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err) 2379 { 2380 /* Clear requests (even when the construction failed) */ 2381 spin_lock(&sci->sc_state_lock); 2382 2383 if (mode == SC_LSEG_SR) { 2384 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT; 2385 sci->sc_seq_done = sci->sc_seq_accepted; 2386 nilfs_segctor_wakeup(sci, err); 2387 sci->sc_flush_request = 0; 2388 } else { 2389 if (mode == SC_FLUSH_FILE) 2390 sci->sc_flush_request &= ~FLUSH_FILE_BIT; 2391 else if (mode == SC_FLUSH_DAT) 2392 sci->sc_flush_request &= ~FLUSH_DAT_BIT; 2393 2394 /* re-enable timer if checkpoint creation was not done */ 2395 if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) && 2396 time_before(jiffies, sci->sc_timer.expires)) 2397 add_timer(&sci->sc_timer); 2398 } 2399 spin_unlock(&sci->sc_state_lock); 2400 } 2401 2402 /** 2403 * nilfs_segctor_construct - form logs and write them to disk 2404 * @sci: segment constructor object 2405 * @mode: mode of log forming 2406 */ 2407 static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode) 2408 { 2409 struct nilfs_sb_info *sbi = sci->sc_sbi; 2410 struct the_nilfs *nilfs = sbi->s_nilfs; 2411 int err = 0; 2412 2413 nilfs_segctor_accept(sci); 2414 2415 if (nilfs_discontinued(nilfs)) 2416 mode = SC_LSEG_SR; 2417 if (!nilfs_segctor_confirm(sci)) 2418 err = nilfs_segctor_do_construct(sci, mode); 2419 2420 if (likely(!err)) { 2421 if (mode != SC_FLUSH_DAT) 2422 atomic_set(&nilfs->ns_ndirtyblks, 0); 2423 if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) && 2424 nilfs_discontinued(nilfs)) { 2425 down_write(&nilfs->ns_sem); 2426 err = nilfs_commit_super( 2427 sbi, nilfs_altsb_need_update(nilfs)); 2428 up_write(&nilfs->ns_sem); 2429 } 2430 } 2431 2432 nilfs_segctor_notify(sci, mode, err); 2433 return err; 2434 } 2435 2436 static void nilfs_construction_timeout(unsigned long data) 2437 { 2438 struct task_struct *p = (struct task_struct *)data; 2439 wake_up_process(p); 2440 } 2441 2442 static void 2443 nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head) 2444 { 2445 struct nilfs_inode_info *ii, *n; 2446 2447 list_for_each_entry_safe(ii, n, head, i_dirty) { 2448 if (!test_bit(NILFS_I_UPDATED, &ii->i_state)) 2449 continue; 2450 hlist_del_init(&ii->vfs_inode.i_hash); 2451 list_del_init(&ii->i_dirty); 2452 nilfs_clear_gcinode(&ii->vfs_inode); 2453 } 2454 } 2455 2456 int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv, 2457 void **kbufs) 2458 { 2459 struct nilfs_sb_info *sbi = NILFS_SB(sb); 2460 struct nilfs_sc_info *sci = NILFS_SC(sbi); 2461 struct the_nilfs *nilfs = sbi->s_nilfs; 2462 struct nilfs_transaction_info ti; 2463 int err; 2464 2465 if (unlikely(!sci)) 2466 return -EROFS; 2467 2468 nilfs_transaction_lock(sbi, &ti, 1); 2469 2470 err = nilfs_init_gcdat_inode(nilfs); 2471 if (unlikely(err)) 2472 goto out_unlock; 2473 2474 err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs); 2475 if (unlikely(err)) 2476 goto out_unlock; 2477 2478 sci->sc_freesegs = kbufs[4]; 2479 sci->sc_nfreesegs = argv[4].v_nmembs; 2480 list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes); 2481 2482 for (;;) { 2483 err = nilfs_segctor_construct(sci, SC_LSEG_SR); 2484 nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes); 2485 2486 if (likely(!err)) 2487 break; 2488 2489 nilfs_warning(sb, __func__, 2490 "segment construction failed. (err=%d)", err); 2491 set_current_state(TASK_INTERRUPTIBLE); 2492 schedule_timeout(sci->sc_interval); 2493 } 2494 if (nilfs_test_opt(sbi, DISCARD)) { 2495 int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs, 2496 sci->sc_nfreesegs); 2497 if (ret) { 2498 printk(KERN_WARNING 2499 "NILFS warning: error %d on discard request, " 2500 "turning discards off for the device\n", ret); 2501 nilfs_clear_opt(sbi, DISCARD); 2502 } 2503 } 2504 2505 out_unlock: 2506 sci->sc_freesegs = NULL; 2507 sci->sc_nfreesegs = 0; 2508 nilfs_clear_gcdat_inode(nilfs); 2509 nilfs_transaction_unlock(sbi); 2510 return err; 2511 } 2512 2513 static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode) 2514 { 2515 struct nilfs_sb_info *sbi = sci->sc_sbi; 2516 struct nilfs_transaction_info ti; 2517 2518 nilfs_transaction_lock(sbi, &ti, 0); 2519 nilfs_segctor_construct(sci, mode); 2520 2521 /* 2522 * Unclosed segment should be retried. We do this using sc_timer. 2523 * Timeout of sc_timer will invoke complete construction which leads 2524 * to close the current logical segment. 2525 */ 2526 if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) 2527 nilfs_segctor_start_timer(sci); 2528 2529 nilfs_transaction_unlock(sbi); 2530 } 2531 2532 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci) 2533 { 2534 int mode = 0; 2535 int err; 2536 2537 spin_lock(&sci->sc_state_lock); 2538 mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ? 2539 SC_FLUSH_DAT : SC_FLUSH_FILE; 2540 spin_unlock(&sci->sc_state_lock); 2541 2542 if (mode) { 2543 err = nilfs_segctor_do_construct(sci, mode); 2544 2545 spin_lock(&sci->sc_state_lock); 2546 sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ? 2547 ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT; 2548 spin_unlock(&sci->sc_state_lock); 2549 } 2550 clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags); 2551 } 2552 2553 static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci) 2554 { 2555 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) || 2556 time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) { 2557 if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT)) 2558 return SC_FLUSH_FILE; 2559 else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT)) 2560 return SC_FLUSH_DAT; 2561 } 2562 return SC_LSEG_SR; 2563 } 2564 2565 /** 2566 * nilfs_segctor_thread - main loop of the segment constructor thread. 2567 * @arg: pointer to a struct nilfs_sc_info. 2568 * 2569 * nilfs_segctor_thread() initializes a timer and serves as a daemon 2570 * to execute segment constructions. 2571 */ 2572 static int nilfs_segctor_thread(void *arg) 2573 { 2574 struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg; 2575 struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs; 2576 int timeout = 0; 2577 2578 sci->sc_timer.data = (unsigned long)current; 2579 sci->sc_timer.function = nilfs_construction_timeout; 2580 2581 /* start sync. */ 2582 sci->sc_task = current; 2583 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */ 2584 printk(KERN_INFO 2585 "segctord starting. Construction interval = %lu seconds, " 2586 "CP frequency < %lu seconds\n", 2587 sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ); 2588 2589 spin_lock(&sci->sc_state_lock); 2590 loop: 2591 for (;;) { 2592 int mode; 2593 2594 if (sci->sc_state & NILFS_SEGCTOR_QUIT) 2595 goto end_thread; 2596 2597 if (timeout || sci->sc_seq_request != sci->sc_seq_done) 2598 mode = SC_LSEG_SR; 2599 else if (!sci->sc_flush_request) 2600 break; 2601 else 2602 mode = nilfs_segctor_flush_mode(sci); 2603 2604 spin_unlock(&sci->sc_state_lock); 2605 nilfs_segctor_thread_construct(sci, mode); 2606 spin_lock(&sci->sc_state_lock); 2607 timeout = 0; 2608 } 2609 2610 2611 if (freezing(current)) { 2612 spin_unlock(&sci->sc_state_lock); 2613 refrigerator(); 2614 spin_lock(&sci->sc_state_lock); 2615 } else { 2616 DEFINE_WAIT(wait); 2617 int should_sleep = 1; 2618 2619 prepare_to_wait(&sci->sc_wait_daemon, &wait, 2620 TASK_INTERRUPTIBLE); 2621 2622 if (sci->sc_seq_request != sci->sc_seq_done) 2623 should_sleep = 0; 2624 else if (sci->sc_flush_request) 2625 should_sleep = 0; 2626 else if (sci->sc_state & NILFS_SEGCTOR_COMMIT) 2627 should_sleep = time_before(jiffies, 2628 sci->sc_timer.expires); 2629 2630 if (should_sleep) { 2631 spin_unlock(&sci->sc_state_lock); 2632 schedule(); 2633 spin_lock(&sci->sc_state_lock); 2634 } 2635 finish_wait(&sci->sc_wait_daemon, &wait); 2636 timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) && 2637 time_after_eq(jiffies, sci->sc_timer.expires)); 2638 2639 if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs)) 2640 set_nilfs_discontinued(nilfs); 2641 } 2642 goto loop; 2643 2644 end_thread: 2645 spin_unlock(&sci->sc_state_lock); 2646 2647 /* end sync. */ 2648 sci->sc_task = NULL; 2649 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */ 2650 return 0; 2651 } 2652 2653 static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci) 2654 { 2655 struct task_struct *t; 2656 2657 t = kthread_run(nilfs_segctor_thread, sci, "segctord"); 2658 if (IS_ERR(t)) { 2659 int err = PTR_ERR(t); 2660 2661 printk(KERN_ERR "NILFS: error %d creating segctord thread\n", 2662 err); 2663 return err; 2664 } 2665 wait_event(sci->sc_wait_task, sci->sc_task != NULL); 2666 return 0; 2667 } 2668 2669 static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci) 2670 { 2671 sci->sc_state |= NILFS_SEGCTOR_QUIT; 2672 2673 while (sci->sc_task) { 2674 wake_up(&sci->sc_wait_daemon); 2675 spin_unlock(&sci->sc_state_lock); 2676 wait_event(sci->sc_wait_task, sci->sc_task == NULL); 2677 spin_lock(&sci->sc_state_lock); 2678 } 2679 } 2680 2681 /* 2682 * Setup & clean-up functions 2683 */ 2684 static struct nilfs_sc_info *nilfs_segctor_new(struct nilfs_sb_info *sbi) 2685 { 2686 struct nilfs_sc_info *sci; 2687 2688 sci = kzalloc(sizeof(*sci), GFP_KERNEL); 2689 if (!sci) 2690 return NULL; 2691 2692 sci->sc_sbi = sbi; 2693 sci->sc_super = sbi->s_super; 2694 2695 init_waitqueue_head(&sci->sc_wait_request); 2696 init_waitqueue_head(&sci->sc_wait_daemon); 2697 init_waitqueue_head(&sci->sc_wait_task); 2698 spin_lock_init(&sci->sc_state_lock); 2699 INIT_LIST_HEAD(&sci->sc_dirty_files); 2700 INIT_LIST_HEAD(&sci->sc_segbufs); 2701 INIT_LIST_HEAD(&sci->sc_write_logs); 2702 INIT_LIST_HEAD(&sci->sc_gc_inodes); 2703 INIT_LIST_HEAD(&sci->sc_copied_buffers); 2704 init_timer(&sci->sc_timer); 2705 2706 sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT; 2707 sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ; 2708 sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK; 2709 2710 if (sbi->s_interval) 2711 sci->sc_interval = sbi->s_interval; 2712 if (sbi->s_watermark) 2713 sci->sc_watermark = sbi->s_watermark; 2714 return sci; 2715 } 2716 2717 static void nilfs_segctor_write_out(struct nilfs_sc_info *sci) 2718 { 2719 int ret, retrycount = NILFS_SC_CLEANUP_RETRY; 2720 2721 /* The segctord thread was stopped and its timer was removed. 2722 But some tasks remain. */ 2723 do { 2724 struct nilfs_sb_info *sbi = sci->sc_sbi; 2725 struct nilfs_transaction_info ti; 2726 2727 nilfs_transaction_lock(sbi, &ti, 0); 2728 ret = nilfs_segctor_construct(sci, SC_LSEG_SR); 2729 nilfs_transaction_unlock(sbi); 2730 2731 } while (ret && retrycount-- > 0); 2732 } 2733 2734 /** 2735 * nilfs_segctor_destroy - destroy the segment constructor. 2736 * @sci: nilfs_sc_info 2737 * 2738 * nilfs_segctor_destroy() kills the segctord thread and frees 2739 * the nilfs_sc_info struct. 2740 * Caller must hold the segment semaphore. 2741 */ 2742 static void nilfs_segctor_destroy(struct nilfs_sc_info *sci) 2743 { 2744 struct nilfs_sb_info *sbi = sci->sc_sbi; 2745 int flag; 2746 2747 up_write(&sbi->s_nilfs->ns_segctor_sem); 2748 2749 spin_lock(&sci->sc_state_lock); 2750 nilfs_segctor_kill_thread(sci); 2751 flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request 2752 || sci->sc_seq_request != sci->sc_seq_done); 2753 spin_unlock(&sci->sc_state_lock); 2754 2755 if (flag || !nilfs_segctor_confirm(sci)) 2756 nilfs_segctor_write_out(sci); 2757 2758 WARN_ON(!list_empty(&sci->sc_copied_buffers)); 2759 2760 if (!list_empty(&sci->sc_dirty_files)) { 2761 nilfs_warning(sbi->s_super, __func__, 2762 "dirty file(s) after the final construction\n"); 2763 nilfs_dispose_list(sbi, &sci->sc_dirty_files, 1); 2764 } 2765 2766 WARN_ON(!list_empty(&sci->sc_segbufs)); 2767 WARN_ON(!list_empty(&sci->sc_write_logs)); 2768 2769 down_write(&sbi->s_nilfs->ns_segctor_sem); 2770 2771 del_timer_sync(&sci->sc_timer); 2772 kfree(sci); 2773 } 2774 2775 /** 2776 * nilfs_attach_segment_constructor - attach a segment constructor 2777 * @sbi: nilfs_sb_info 2778 * 2779 * nilfs_attach_segment_constructor() allocates a struct nilfs_sc_info, 2780 * initializes it, and starts the segment constructor. 2781 * 2782 * Return Value: On success, 0 is returned. On error, one of the following 2783 * negative error code is returned. 2784 * 2785 * %-ENOMEM - Insufficient memory available. 2786 */ 2787 int nilfs_attach_segment_constructor(struct nilfs_sb_info *sbi) 2788 { 2789 struct the_nilfs *nilfs = sbi->s_nilfs; 2790 int err; 2791 2792 if (NILFS_SC(sbi)) { 2793 /* 2794 * This happens if the filesystem was remounted 2795 * read/write after nilfs_error degenerated it into a 2796 * read-only mount. 2797 */ 2798 nilfs_detach_segment_constructor(sbi); 2799 } 2800 2801 sbi->s_sc_info = nilfs_segctor_new(sbi); 2802 if (!sbi->s_sc_info) 2803 return -ENOMEM; 2804 2805 nilfs_attach_writer(nilfs, sbi); 2806 err = nilfs_segctor_start_thread(NILFS_SC(sbi)); 2807 if (err) { 2808 nilfs_detach_writer(nilfs, sbi); 2809 kfree(sbi->s_sc_info); 2810 sbi->s_sc_info = NULL; 2811 } 2812 return err; 2813 } 2814 2815 /** 2816 * nilfs_detach_segment_constructor - destroy the segment constructor 2817 * @sbi: nilfs_sb_info 2818 * 2819 * nilfs_detach_segment_constructor() kills the segment constructor daemon, 2820 * frees the struct nilfs_sc_info, and destroy the dirty file list. 2821 */ 2822 void nilfs_detach_segment_constructor(struct nilfs_sb_info *sbi) 2823 { 2824 struct the_nilfs *nilfs = sbi->s_nilfs; 2825 LIST_HEAD(garbage_list); 2826 2827 down_write(&nilfs->ns_segctor_sem); 2828 if (NILFS_SC(sbi)) { 2829 nilfs_segctor_destroy(NILFS_SC(sbi)); 2830 sbi->s_sc_info = NULL; 2831 } 2832 2833 /* Force to free the list of dirty files */ 2834 spin_lock(&sbi->s_inode_lock); 2835 if (!list_empty(&sbi->s_dirty_files)) { 2836 list_splice_init(&sbi->s_dirty_files, &garbage_list); 2837 nilfs_warning(sbi->s_super, __func__, 2838 "Non empty dirty list after the last " 2839 "segment construction\n"); 2840 } 2841 spin_unlock(&sbi->s_inode_lock); 2842 up_write(&nilfs->ns_segctor_sem); 2843 2844 nilfs_dispose_list(sbi, &garbage_list, 1); 2845 nilfs_detach_writer(nilfs, sbi); 2846 } 2847