1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * NILFS segment constructor. 4 * 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 6 * 7 * Written by Ryusuke Konishi. 8 * 9 */ 10 11 #include <linux/pagemap.h> 12 #include <linux/buffer_head.h> 13 #include <linux/writeback.h> 14 #include <linux/bitops.h> 15 #include <linux/bio.h> 16 #include <linux/completion.h> 17 #include <linux/blkdev.h> 18 #include <linux/backing-dev.h> 19 #include <linux/freezer.h> 20 #include <linux/kthread.h> 21 #include <linux/crc32.h> 22 #include <linux/pagevec.h> 23 #include <linux/slab.h> 24 #include <linux/sched/signal.h> 25 26 #include "nilfs.h" 27 #include "btnode.h" 28 #include "page.h" 29 #include "segment.h" 30 #include "sufile.h" 31 #include "cpfile.h" 32 #include "ifile.h" 33 #include "segbuf.h" 34 35 36 /* 37 * Segment constructor 38 */ 39 #define SC_N_INODEVEC 16 /* Size of locally allocated inode vector */ 40 41 #define SC_MAX_SEGDELTA 64 /* 42 * Upper limit of the number of segments 43 * appended in collection retry loop 44 */ 45 46 /* Construction mode */ 47 enum { 48 SC_LSEG_SR = 1, /* Make a logical segment having a super root */ 49 SC_LSEG_DSYNC, /* 50 * Flush data blocks of a given file and make 51 * a logical segment without a super root. 52 */ 53 SC_FLUSH_FILE, /* 54 * Flush data files, leads to segment writes without 55 * creating a checkpoint. 56 */ 57 SC_FLUSH_DAT, /* 58 * Flush DAT file. This also creates segments 59 * without a checkpoint. 60 */ 61 }; 62 63 /* Stage numbers of dirty block collection */ 64 enum { 65 NILFS_ST_INIT = 0, 66 NILFS_ST_GC, /* Collecting dirty blocks for GC */ 67 NILFS_ST_FILE, 68 NILFS_ST_IFILE, 69 NILFS_ST_CPFILE, 70 NILFS_ST_SUFILE, 71 NILFS_ST_DAT, 72 NILFS_ST_SR, /* Super root */ 73 NILFS_ST_DSYNC, /* Data sync blocks */ 74 NILFS_ST_DONE, 75 }; 76 77 #define CREATE_TRACE_POINTS 78 #include <trace/events/nilfs2.h> 79 80 /* 81 * nilfs_sc_cstage_inc(), nilfs_sc_cstage_set(), nilfs_sc_cstage_get() are 82 * wrapper functions of stage count (nilfs_sc_info->sc_stage.scnt). Users of 83 * the variable must use them because transition of stage count must involve 84 * trace events (trace_nilfs2_collection_stage_transition). 85 * 86 * nilfs_sc_cstage_get() isn't required for the above purpose because it doesn't 87 * produce tracepoint events. It is provided just for making the intention 88 * clear. 89 */ 90 static inline void nilfs_sc_cstage_inc(struct nilfs_sc_info *sci) 91 { 92 sci->sc_stage.scnt++; 93 trace_nilfs2_collection_stage_transition(sci); 94 } 95 96 static inline void nilfs_sc_cstage_set(struct nilfs_sc_info *sci, int next_scnt) 97 { 98 sci->sc_stage.scnt = next_scnt; 99 trace_nilfs2_collection_stage_transition(sci); 100 } 101 102 static inline int nilfs_sc_cstage_get(struct nilfs_sc_info *sci) 103 { 104 return sci->sc_stage.scnt; 105 } 106 107 /* State flags of collection */ 108 #define NILFS_CF_NODE 0x0001 /* Collecting node blocks */ 109 #define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */ 110 #define NILFS_CF_SUFREED 0x0004 /* segment usages has been freed */ 111 #define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED) 112 113 /* Operations depending on the construction mode and file type */ 114 struct nilfs_sc_operations { 115 int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *, 116 struct inode *); 117 int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *, 118 struct inode *); 119 int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *, 120 struct inode *); 121 void (*write_data_binfo)(struct nilfs_sc_info *, 122 struct nilfs_segsum_pointer *, 123 union nilfs_binfo *); 124 void (*write_node_binfo)(struct nilfs_sc_info *, 125 struct nilfs_segsum_pointer *, 126 union nilfs_binfo *); 127 }; 128 129 /* 130 * Other definitions 131 */ 132 static void nilfs_segctor_start_timer(struct nilfs_sc_info *); 133 static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int); 134 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *); 135 static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int); 136 137 #define nilfs_cnt32_ge(a, b) \ 138 (typecheck(__u32, a) && typecheck(__u32, b) && \ 139 ((__s32)((a) - (b)) >= 0)) 140 141 static int nilfs_prepare_segment_lock(struct super_block *sb, 142 struct nilfs_transaction_info *ti) 143 { 144 struct nilfs_transaction_info *cur_ti = current->journal_info; 145 void *save = NULL; 146 147 if (cur_ti) { 148 if (cur_ti->ti_magic == NILFS_TI_MAGIC) 149 return ++cur_ti->ti_count; 150 151 /* 152 * If journal_info field is occupied by other FS, 153 * it is saved and will be restored on 154 * nilfs_transaction_commit(). 155 */ 156 nilfs_warn(sb, "journal info from a different FS"); 157 save = current->journal_info; 158 } 159 if (!ti) { 160 ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS); 161 if (!ti) 162 return -ENOMEM; 163 ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC; 164 } else { 165 ti->ti_flags = 0; 166 } 167 ti->ti_count = 0; 168 ti->ti_save = save; 169 ti->ti_magic = NILFS_TI_MAGIC; 170 current->journal_info = ti; 171 return 0; 172 } 173 174 /** 175 * nilfs_transaction_begin - start indivisible file operations. 176 * @sb: super block 177 * @ti: nilfs_transaction_info 178 * @vacancy_check: flags for vacancy rate checks 179 * 180 * nilfs_transaction_begin() acquires a reader/writer semaphore, called 181 * the segment semaphore, to make a segment construction and write tasks 182 * exclusive. The function is used with nilfs_transaction_commit() in pairs. 183 * The region enclosed by these two functions can be nested. To avoid a 184 * deadlock, the semaphore is only acquired or released in the outermost call. 185 * 186 * This function allocates a nilfs_transaction_info struct to keep context 187 * information on it. It is initialized and hooked onto the current task in 188 * the outermost call. If a pre-allocated struct is given to @ti, it is used 189 * instead; otherwise a new struct is assigned from a slab. 190 * 191 * When @vacancy_check flag is set, this function will check the amount of 192 * free space, and will wait for the GC to reclaim disk space if low capacity. 193 * 194 * Return Value: On success, 0 is returned. On error, one of the following 195 * negative error code is returned. 196 * 197 * %-ENOMEM - Insufficient memory available. 198 * 199 * %-ENOSPC - No space left on device 200 */ 201 int nilfs_transaction_begin(struct super_block *sb, 202 struct nilfs_transaction_info *ti, 203 int vacancy_check) 204 { 205 struct the_nilfs *nilfs; 206 int ret = nilfs_prepare_segment_lock(sb, ti); 207 struct nilfs_transaction_info *trace_ti; 208 209 if (unlikely(ret < 0)) 210 return ret; 211 if (ret > 0) { 212 trace_ti = current->journal_info; 213 214 trace_nilfs2_transaction_transition(sb, trace_ti, 215 trace_ti->ti_count, trace_ti->ti_flags, 216 TRACE_NILFS2_TRANSACTION_BEGIN); 217 return 0; 218 } 219 220 sb_start_intwrite(sb); 221 222 nilfs = sb->s_fs_info; 223 down_read(&nilfs->ns_segctor_sem); 224 if (vacancy_check && nilfs_near_disk_full(nilfs)) { 225 up_read(&nilfs->ns_segctor_sem); 226 ret = -ENOSPC; 227 goto failed; 228 } 229 230 trace_ti = current->journal_info; 231 trace_nilfs2_transaction_transition(sb, trace_ti, trace_ti->ti_count, 232 trace_ti->ti_flags, 233 TRACE_NILFS2_TRANSACTION_BEGIN); 234 return 0; 235 236 failed: 237 ti = current->journal_info; 238 current->journal_info = ti->ti_save; 239 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC) 240 kmem_cache_free(nilfs_transaction_cachep, ti); 241 sb_end_intwrite(sb); 242 return ret; 243 } 244 245 /** 246 * nilfs_transaction_commit - commit indivisible file operations. 247 * @sb: super block 248 * 249 * nilfs_transaction_commit() releases the read semaphore which is 250 * acquired by nilfs_transaction_begin(). This is only performed 251 * in outermost call of this function. If a commit flag is set, 252 * nilfs_transaction_commit() sets a timer to start the segment 253 * constructor. If a sync flag is set, it starts construction 254 * directly. 255 */ 256 int nilfs_transaction_commit(struct super_block *sb) 257 { 258 struct nilfs_transaction_info *ti = current->journal_info; 259 struct the_nilfs *nilfs = sb->s_fs_info; 260 int err = 0; 261 262 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC); 263 ti->ti_flags |= NILFS_TI_COMMIT; 264 if (ti->ti_count > 0) { 265 ti->ti_count--; 266 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count, 267 ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT); 268 return 0; 269 } 270 if (nilfs->ns_writer) { 271 struct nilfs_sc_info *sci = nilfs->ns_writer; 272 273 if (ti->ti_flags & NILFS_TI_COMMIT) 274 nilfs_segctor_start_timer(sci); 275 if (atomic_read(&nilfs->ns_ndirtyblks) > sci->sc_watermark) 276 nilfs_segctor_do_flush(sci, 0); 277 } 278 up_read(&nilfs->ns_segctor_sem); 279 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count, 280 ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT); 281 282 current->journal_info = ti->ti_save; 283 284 if (ti->ti_flags & NILFS_TI_SYNC) 285 err = nilfs_construct_segment(sb); 286 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC) 287 kmem_cache_free(nilfs_transaction_cachep, ti); 288 sb_end_intwrite(sb); 289 return err; 290 } 291 292 void nilfs_transaction_abort(struct super_block *sb) 293 { 294 struct nilfs_transaction_info *ti = current->journal_info; 295 struct the_nilfs *nilfs = sb->s_fs_info; 296 297 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC); 298 if (ti->ti_count > 0) { 299 ti->ti_count--; 300 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count, 301 ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT); 302 return; 303 } 304 up_read(&nilfs->ns_segctor_sem); 305 306 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count, 307 ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT); 308 309 current->journal_info = ti->ti_save; 310 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC) 311 kmem_cache_free(nilfs_transaction_cachep, ti); 312 sb_end_intwrite(sb); 313 } 314 315 void nilfs_relax_pressure_in_lock(struct super_block *sb) 316 { 317 struct the_nilfs *nilfs = sb->s_fs_info; 318 struct nilfs_sc_info *sci = nilfs->ns_writer; 319 320 if (sb_rdonly(sb) || unlikely(!sci) || !sci->sc_flush_request) 321 return; 322 323 set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags); 324 up_read(&nilfs->ns_segctor_sem); 325 326 down_write(&nilfs->ns_segctor_sem); 327 if (sci->sc_flush_request && 328 test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) { 329 struct nilfs_transaction_info *ti = current->journal_info; 330 331 ti->ti_flags |= NILFS_TI_WRITER; 332 nilfs_segctor_do_immediate_flush(sci); 333 ti->ti_flags &= ~NILFS_TI_WRITER; 334 } 335 downgrade_write(&nilfs->ns_segctor_sem); 336 } 337 338 static void nilfs_transaction_lock(struct super_block *sb, 339 struct nilfs_transaction_info *ti, 340 int gcflag) 341 { 342 struct nilfs_transaction_info *cur_ti = current->journal_info; 343 struct the_nilfs *nilfs = sb->s_fs_info; 344 struct nilfs_sc_info *sci = nilfs->ns_writer; 345 346 WARN_ON(cur_ti); 347 ti->ti_flags = NILFS_TI_WRITER; 348 ti->ti_count = 0; 349 ti->ti_save = cur_ti; 350 ti->ti_magic = NILFS_TI_MAGIC; 351 current->journal_info = ti; 352 353 for (;;) { 354 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count, 355 ti->ti_flags, TRACE_NILFS2_TRANSACTION_TRYLOCK); 356 357 down_write(&nilfs->ns_segctor_sem); 358 if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) 359 break; 360 361 nilfs_segctor_do_immediate_flush(sci); 362 363 up_write(&nilfs->ns_segctor_sem); 364 cond_resched(); 365 } 366 if (gcflag) 367 ti->ti_flags |= NILFS_TI_GC; 368 369 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count, 370 ti->ti_flags, TRACE_NILFS2_TRANSACTION_LOCK); 371 } 372 373 static void nilfs_transaction_unlock(struct super_block *sb) 374 { 375 struct nilfs_transaction_info *ti = current->journal_info; 376 struct the_nilfs *nilfs = sb->s_fs_info; 377 378 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC); 379 BUG_ON(ti->ti_count > 0); 380 381 up_write(&nilfs->ns_segctor_sem); 382 current->journal_info = ti->ti_save; 383 384 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count, 385 ti->ti_flags, TRACE_NILFS2_TRANSACTION_UNLOCK); 386 } 387 388 static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci, 389 struct nilfs_segsum_pointer *ssp, 390 unsigned int bytes) 391 { 392 struct nilfs_segment_buffer *segbuf = sci->sc_curseg; 393 unsigned int blocksize = sci->sc_super->s_blocksize; 394 void *p; 395 396 if (unlikely(ssp->offset + bytes > blocksize)) { 397 ssp->offset = 0; 398 BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh, 399 &segbuf->sb_segsum_buffers)); 400 ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh); 401 } 402 p = ssp->bh->b_data + ssp->offset; 403 ssp->offset += bytes; 404 return p; 405 } 406 407 /** 408 * nilfs_segctor_reset_segment_buffer - reset the current segment buffer 409 * @sci: nilfs_sc_info 410 */ 411 static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci) 412 { 413 struct nilfs_segment_buffer *segbuf = sci->sc_curseg; 414 struct buffer_head *sumbh; 415 unsigned int sumbytes; 416 unsigned int flags = 0; 417 int err; 418 419 if (nilfs_doing_gc()) 420 flags = NILFS_SS_GC; 421 err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno); 422 if (unlikely(err)) 423 return err; 424 425 sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers); 426 sumbytes = segbuf->sb_sum.sumbytes; 427 sci->sc_finfo_ptr.bh = sumbh; sci->sc_finfo_ptr.offset = sumbytes; 428 sci->sc_binfo_ptr.bh = sumbh; sci->sc_binfo_ptr.offset = sumbytes; 429 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0; 430 return 0; 431 } 432 433 /** 434 * nilfs_segctor_zeropad_segsum - zero pad the rest of the segment summary area 435 * @sci: segment constructor object 436 * 437 * nilfs_segctor_zeropad_segsum() zero-fills unallocated space at the end of 438 * the current segment summary block. 439 */ 440 static void nilfs_segctor_zeropad_segsum(struct nilfs_sc_info *sci) 441 { 442 struct nilfs_segsum_pointer *ssp; 443 444 ssp = sci->sc_blk_cnt > 0 ? &sci->sc_binfo_ptr : &sci->sc_finfo_ptr; 445 if (ssp->offset < ssp->bh->b_size) 446 memset(ssp->bh->b_data + ssp->offset, 0, 447 ssp->bh->b_size - ssp->offset); 448 } 449 450 static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci) 451 { 452 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks; 453 if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs)) 454 return -E2BIG; /* 455 * The current segment is filled up 456 * (internal code) 457 */ 458 nilfs_segctor_zeropad_segsum(sci); 459 sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg); 460 return nilfs_segctor_reset_segment_buffer(sci); 461 } 462 463 static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci) 464 { 465 struct nilfs_segment_buffer *segbuf = sci->sc_curseg; 466 int err; 467 468 if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) { 469 err = nilfs_segctor_feed_segment(sci); 470 if (err) 471 return err; 472 segbuf = sci->sc_curseg; 473 } 474 err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root); 475 if (likely(!err)) 476 segbuf->sb_sum.flags |= NILFS_SS_SR; 477 return err; 478 } 479 480 /* 481 * Functions for making segment summary and payloads 482 */ 483 static int nilfs_segctor_segsum_block_required( 484 struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp, 485 unsigned int binfo_size) 486 { 487 unsigned int blocksize = sci->sc_super->s_blocksize; 488 /* Size of finfo and binfo is enough small against blocksize */ 489 490 return ssp->offset + binfo_size + 491 (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) > 492 blocksize; 493 } 494 495 static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci, 496 struct inode *inode) 497 { 498 sci->sc_curseg->sb_sum.nfinfo++; 499 sci->sc_binfo_ptr = sci->sc_finfo_ptr; 500 nilfs_segctor_map_segsum_entry( 501 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo)); 502 503 if (NILFS_I(inode)->i_root && 504 !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags)) 505 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags); 506 /* skip finfo */ 507 } 508 509 static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci, 510 struct inode *inode) 511 { 512 struct nilfs_finfo *finfo; 513 struct nilfs_inode_info *ii; 514 struct nilfs_segment_buffer *segbuf; 515 __u64 cno; 516 517 if (sci->sc_blk_cnt == 0) 518 return; 519 520 ii = NILFS_I(inode); 521 522 if (ii->i_type & NILFS_I_TYPE_GC) 523 cno = ii->i_cno; 524 else if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) 525 cno = 0; 526 else 527 cno = sci->sc_cno; 528 529 finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr, 530 sizeof(*finfo)); 531 finfo->fi_ino = cpu_to_le64(inode->i_ino); 532 finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt); 533 finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt); 534 finfo->fi_cno = cpu_to_le64(cno); 535 536 segbuf = sci->sc_curseg; 537 segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset + 538 sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1); 539 sci->sc_finfo_ptr = sci->sc_binfo_ptr; 540 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0; 541 } 542 543 static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci, 544 struct buffer_head *bh, 545 struct inode *inode, 546 unsigned int binfo_size) 547 { 548 struct nilfs_segment_buffer *segbuf; 549 int required, err = 0; 550 551 retry: 552 segbuf = sci->sc_curseg; 553 required = nilfs_segctor_segsum_block_required( 554 sci, &sci->sc_binfo_ptr, binfo_size); 555 if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) { 556 nilfs_segctor_end_finfo(sci, inode); 557 err = nilfs_segctor_feed_segment(sci); 558 if (err) 559 return err; 560 goto retry; 561 } 562 if (unlikely(required)) { 563 nilfs_segctor_zeropad_segsum(sci); 564 err = nilfs_segbuf_extend_segsum(segbuf); 565 if (unlikely(err)) 566 goto failed; 567 } 568 if (sci->sc_blk_cnt == 0) 569 nilfs_segctor_begin_finfo(sci, inode); 570 571 nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size); 572 /* Substitution to vblocknr is delayed until update_blocknr() */ 573 nilfs_segbuf_add_file_buffer(segbuf, bh); 574 sci->sc_blk_cnt++; 575 failed: 576 return err; 577 } 578 579 /* 580 * Callback functions that enumerate, mark, and collect dirty blocks 581 */ 582 static int nilfs_collect_file_data(struct nilfs_sc_info *sci, 583 struct buffer_head *bh, struct inode *inode) 584 { 585 int err; 586 587 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh); 588 if (err < 0) 589 return err; 590 591 err = nilfs_segctor_add_file_block(sci, bh, inode, 592 sizeof(struct nilfs_binfo_v)); 593 if (!err) 594 sci->sc_datablk_cnt++; 595 return err; 596 } 597 598 static int nilfs_collect_file_node(struct nilfs_sc_info *sci, 599 struct buffer_head *bh, 600 struct inode *inode) 601 { 602 return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh); 603 } 604 605 static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci, 606 struct buffer_head *bh, 607 struct inode *inode) 608 { 609 WARN_ON(!buffer_dirty(bh)); 610 return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64)); 611 } 612 613 static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci, 614 struct nilfs_segsum_pointer *ssp, 615 union nilfs_binfo *binfo) 616 { 617 struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry( 618 sci, ssp, sizeof(*binfo_v)); 619 *binfo_v = binfo->bi_v; 620 } 621 622 static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci, 623 struct nilfs_segsum_pointer *ssp, 624 union nilfs_binfo *binfo) 625 { 626 __le64 *vblocknr = nilfs_segctor_map_segsum_entry( 627 sci, ssp, sizeof(*vblocknr)); 628 *vblocknr = binfo->bi_v.bi_vblocknr; 629 } 630 631 static const struct nilfs_sc_operations nilfs_sc_file_ops = { 632 .collect_data = nilfs_collect_file_data, 633 .collect_node = nilfs_collect_file_node, 634 .collect_bmap = nilfs_collect_file_bmap, 635 .write_data_binfo = nilfs_write_file_data_binfo, 636 .write_node_binfo = nilfs_write_file_node_binfo, 637 }; 638 639 static int nilfs_collect_dat_data(struct nilfs_sc_info *sci, 640 struct buffer_head *bh, struct inode *inode) 641 { 642 int err; 643 644 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh); 645 if (err < 0) 646 return err; 647 648 err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64)); 649 if (!err) 650 sci->sc_datablk_cnt++; 651 return err; 652 } 653 654 static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci, 655 struct buffer_head *bh, struct inode *inode) 656 { 657 WARN_ON(!buffer_dirty(bh)); 658 return nilfs_segctor_add_file_block(sci, bh, inode, 659 sizeof(struct nilfs_binfo_dat)); 660 } 661 662 static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci, 663 struct nilfs_segsum_pointer *ssp, 664 union nilfs_binfo *binfo) 665 { 666 __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp, 667 sizeof(*blkoff)); 668 *blkoff = binfo->bi_dat.bi_blkoff; 669 } 670 671 static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci, 672 struct nilfs_segsum_pointer *ssp, 673 union nilfs_binfo *binfo) 674 { 675 struct nilfs_binfo_dat *binfo_dat = 676 nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat)); 677 *binfo_dat = binfo->bi_dat; 678 } 679 680 static const struct nilfs_sc_operations nilfs_sc_dat_ops = { 681 .collect_data = nilfs_collect_dat_data, 682 .collect_node = nilfs_collect_file_node, 683 .collect_bmap = nilfs_collect_dat_bmap, 684 .write_data_binfo = nilfs_write_dat_data_binfo, 685 .write_node_binfo = nilfs_write_dat_node_binfo, 686 }; 687 688 static const struct nilfs_sc_operations nilfs_sc_dsync_ops = { 689 .collect_data = nilfs_collect_file_data, 690 .collect_node = NULL, 691 .collect_bmap = NULL, 692 .write_data_binfo = nilfs_write_file_data_binfo, 693 .write_node_binfo = NULL, 694 }; 695 696 static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode, 697 struct list_head *listp, 698 size_t nlimit, 699 loff_t start, loff_t end) 700 { 701 struct address_space *mapping = inode->i_mapping; 702 struct folio_batch fbatch; 703 pgoff_t index = 0, last = ULONG_MAX; 704 size_t ndirties = 0; 705 int i; 706 707 if (unlikely(start != 0 || end != LLONG_MAX)) { 708 /* 709 * A valid range is given for sync-ing data pages. The 710 * range is rounded to per-page; extra dirty buffers 711 * may be included if blocksize < pagesize. 712 */ 713 index = start >> PAGE_SHIFT; 714 last = end >> PAGE_SHIFT; 715 } 716 folio_batch_init(&fbatch); 717 repeat: 718 if (unlikely(index > last) || 719 !filemap_get_folios_tag(mapping, &index, last, 720 PAGECACHE_TAG_DIRTY, &fbatch)) 721 return ndirties; 722 723 for (i = 0; i < folio_batch_count(&fbatch); i++) { 724 struct buffer_head *bh, *head; 725 struct folio *folio = fbatch.folios[i]; 726 727 folio_lock(folio); 728 if (unlikely(folio->mapping != mapping)) { 729 /* Exclude folios removed from the address space */ 730 folio_unlock(folio); 731 continue; 732 } 733 head = folio_buffers(folio); 734 if (!head) 735 head = create_empty_buffers(folio, 736 i_blocksize(inode), 0); 737 folio_unlock(folio); 738 739 bh = head; 740 do { 741 if (!buffer_dirty(bh) || buffer_async_write(bh)) 742 continue; 743 get_bh(bh); 744 list_add_tail(&bh->b_assoc_buffers, listp); 745 ndirties++; 746 if (unlikely(ndirties >= nlimit)) { 747 folio_batch_release(&fbatch); 748 cond_resched(); 749 return ndirties; 750 } 751 } while (bh = bh->b_this_page, bh != head); 752 } 753 folio_batch_release(&fbatch); 754 cond_resched(); 755 goto repeat; 756 } 757 758 static void nilfs_lookup_dirty_node_buffers(struct inode *inode, 759 struct list_head *listp) 760 { 761 struct nilfs_inode_info *ii = NILFS_I(inode); 762 struct inode *btnc_inode = ii->i_assoc_inode; 763 struct folio_batch fbatch; 764 struct buffer_head *bh, *head; 765 unsigned int i; 766 pgoff_t index = 0; 767 768 if (!btnc_inode) 769 return; 770 folio_batch_init(&fbatch); 771 772 while (filemap_get_folios_tag(btnc_inode->i_mapping, &index, 773 (pgoff_t)-1, PAGECACHE_TAG_DIRTY, &fbatch)) { 774 for (i = 0; i < folio_batch_count(&fbatch); i++) { 775 bh = head = folio_buffers(fbatch.folios[i]); 776 do { 777 if (buffer_dirty(bh) && 778 !buffer_async_write(bh)) { 779 get_bh(bh); 780 list_add_tail(&bh->b_assoc_buffers, 781 listp); 782 } 783 bh = bh->b_this_page; 784 } while (bh != head); 785 } 786 folio_batch_release(&fbatch); 787 cond_resched(); 788 } 789 } 790 791 static void nilfs_dispose_list(struct the_nilfs *nilfs, 792 struct list_head *head, int force) 793 { 794 struct nilfs_inode_info *ii, *n; 795 struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii; 796 unsigned int nv = 0; 797 798 while (!list_empty(head)) { 799 spin_lock(&nilfs->ns_inode_lock); 800 list_for_each_entry_safe(ii, n, head, i_dirty) { 801 list_del_init(&ii->i_dirty); 802 if (force) { 803 if (unlikely(ii->i_bh)) { 804 brelse(ii->i_bh); 805 ii->i_bh = NULL; 806 } 807 } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) { 808 set_bit(NILFS_I_QUEUED, &ii->i_state); 809 list_add_tail(&ii->i_dirty, 810 &nilfs->ns_dirty_files); 811 continue; 812 } 813 ivec[nv++] = ii; 814 if (nv == SC_N_INODEVEC) 815 break; 816 } 817 spin_unlock(&nilfs->ns_inode_lock); 818 819 for (pii = ivec; nv > 0; pii++, nv--) 820 iput(&(*pii)->vfs_inode); 821 } 822 } 823 824 static void nilfs_iput_work_func(struct work_struct *work) 825 { 826 struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info, 827 sc_iput_work); 828 struct the_nilfs *nilfs = sci->sc_super->s_fs_info; 829 830 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0); 831 } 832 833 static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs, 834 struct nilfs_root *root) 835 { 836 int ret = 0; 837 838 if (nilfs_mdt_fetch_dirty(root->ifile)) 839 ret++; 840 if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile)) 841 ret++; 842 if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile)) 843 ret++; 844 if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat)) 845 ret++; 846 return ret; 847 } 848 849 static int nilfs_segctor_clean(struct nilfs_sc_info *sci) 850 { 851 return list_empty(&sci->sc_dirty_files) && 852 !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) && 853 sci->sc_nfreesegs == 0 && 854 (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes)); 855 } 856 857 static int nilfs_segctor_confirm(struct nilfs_sc_info *sci) 858 { 859 struct the_nilfs *nilfs = sci->sc_super->s_fs_info; 860 int ret = 0; 861 862 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root)) 863 set_bit(NILFS_SC_DIRTY, &sci->sc_flags); 864 865 spin_lock(&nilfs->ns_inode_lock); 866 if (list_empty(&nilfs->ns_dirty_files) && nilfs_segctor_clean(sci)) 867 ret++; 868 869 spin_unlock(&nilfs->ns_inode_lock); 870 return ret; 871 } 872 873 static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci) 874 { 875 struct the_nilfs *nilfs = sci->sc_super->s_fs_info; 876 877 nilfs_mdt_clear_dirty(sci->sc_root->ifile); 878 nilfs_mdt_clear_dirty(nilfs->ns_cpfile); 879 nilfs_mdt_clear_dirty(nilfs->ns_sufile); 880 nilfs_mdt_clear_dirty(nilfs->ns_dat); 881 } 882 883 static void nilfs_fill_in_file_bmap(struct inode *ifile, 884 struct nilfs_inode_info *ii) 885 886 { 887 struct buffer_head *ibh; 888 struct nilfs_inode *raw_inode; 889 890 if (test_bit(NILFS_I_BMAP, &ii->i_state)) { 891 ibh = ii->i_bh; 892 BUG_ON(!ibh); 893 raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino, 894 ibh); 895 nilfs_bmap_write(ii->i_bmap, raw_inode); 896 nilfs_ifile_unmap_inode(raw_inode); 897 } 898 } 899 900 static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci) 901 { 902 struct nilfs_inode_info *ii; 903 904 list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) { 905 nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii); 906 set_bit(NILFS_I_COLLECTED, &ii->i_state); 907 } 908 } 909 910 /** 911 * nilfs_write_root_mdt_inode - export root metadata inode information to 912 * the on-disk inode 913 * @inode: inode object of the root metadata file 914 * @raw_inode: on-disk inode 915 * 916 * nilfs_write_root_mdt_inode() writes inode information and bmap data of 917 * @inode to the inode area of the metadata file allocated on the super root 918 * block created to finalize the log. Since super root blocks are configured 919 * each time, this function zero-fills the unused area of @raw_inode. 920 */ 921 static void nilfs_write_root_mdt_inode(struct inode *inode, 922 struct nilfs_inode *raw_inode) 923 { 924 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 925 926 nilfs_write_inode_common(inode, raw_inode); 927 928 /* zero-fill unused portion of raw_inode */ 929 raw_inode->i_xattr = 0; 930 raw_inode->i_pad = 0; 931 memset((void *)raw_inode + sizeof(*raw_inode), 0, 932 nilfs->ns_inode_size - sizeof(*raw_inode)); 933 934 nilfs_bmap_write(NILFS_I(inode)->i_bmap, raw_inode); 935 } 936 937 static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci, 938 struct the_nilfs *nilfs) 939 { 940 struct buffer_head *bh_sr; 941 struct nilfs_super_root *raw_sr; 942 unsigned int isz, srsz; 943 944 bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root; 945 946 lock_buffer(bh_sr); 947 raw_sr = (struct nilfs_super_root *)bh_sr->b_data; 948 isz = nilfs->ns_inode_size; 949 srsz = NILFS_SR_BYTES(isz); 950 951 raw_sr->sr_sum = 0; /* Ensure initialization within this update */ 952 raw_sr->sr_bytes = cpu_to_le16(srsz); 953 raw_sr->sr_nongc_ctime 954 = cpu_to_le64(nilfs_doing_gc() ? 955 nilfs->ns_nongc_ctime : sci->sc_seg_ctime); 956 raw_sr->sr_flags = 0; 957 958 nilfs_write_root_mdt_inode(nilfs->ns_dat, (void *)raw_sr + 959 NILFS_SR_DAT_OFFSET(isz)); 960 nilfs_write_root_mdt_inode(nilfs->ns_cpfile, (void *)raw_sr + 961 NILFS_SR_CPFILE_OFFSET(isz)); 962 nilfs_write_root_mdt_inode(nilfs->ns_sufile, (void *)raw_sr + 963 NILFS_SR_SUFILE_OFFSET(isz)); 964 965 memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz); 966 set_buffer_uptodate(bh_sr); 967 unlock_buffer(bh_sr); 968 } 969 970 static void nilfs_redirty_inodes(struct list_head *head) 971 { 972 struct nilfs_inode_info *ii; 973 974 list_for_each_entry(ii, head, i_dirty) { 975 if (test_bit(NILFS_I_COLLECTED, &ii->i_state)) 976 clear_bit(NILFS_I_COLLECTED, &ii->i_state); 977 } 978 } 979 980 static void nilfs_drop_collected_inodes(struct list_head *head) 981 { 982 struct nilfs_inode_info *ii; 983 984 list_for_each_entry(ii, head, i_dirty) { 985 if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state)) 986 continue; 987 988 clear_bit(NILFS_I_INODE_SYNC, &ii->i_state); 989 set_bit(NILFS_I_UPDATED, &ii->i_state); 990 } 991 } 992 993 static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci, 994 struct inode *inode, 995 struct list_head *listp, 996 int (*collect)(struct nilfs_sc_info *, 997 struct buffer_head *, 998 struct inode *)) 999 { 1000 struct buffer_head *bh, *n; 1001 int err = 0; 1002 1003 if (collect) { 1004 list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) { 1005 list_del_init(&bh->b_assoc_buffers); 1006 err = collect(sci, bh, inode); 1007 brelse(bh); 1008 if (unlikely(err)) 1009 goto dispose_buffers; 1010 } 1011 return 0; 1012 } 1013 1014 dispose_buffers: 1015 while (!list_empty(listp)) { 1016 bh = list_first_entry(listp, struct buffer_head, 1017 b_assoc_buffers); 1018 list_del_init(&bh->b_assoc_buffers); 1019 brelse(bh); 1020 } 1021 return err; 1022 } 1023 1024 static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci) 1025 { 1026 /* Remaining number of blocks within segment buffer */ 1027 return sci->sc_segbuf_nblocks - 1028 (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks); 1029 } 1030 1031 static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci, 1032 struct inode *inode, 1033 const struct nilfs_sc_operations *sc_ops) 1034 { 1035 LIST_HEAD(data_buffers); 1036 LIST_HEAD(node_buffers); 1037 int err; 1038 1039 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) { 1040 size_t n, rest = nilfs_segctor_buffer_rest(sci); 1041 1042 n = nilfs_lookup_dirty_data_buffers( 1043 inode, &data_buffers, rest + 1, 0, LLONG_MAX); 1044 if (n > rest) { 1045 err = nilfs_segctor_apply_buffers( 1046 sci, inode, &data_buffers, 1047 sc_ops->collect_data); 1048 BUG_ON(!err); /* always receive -E2BIG or true error */ 1049 goto break_or_fail; 1050 } 1051 } 1052 nilfs_lookup_dirty_node_buffers(inode, &node_buffers); 1053 1054 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) { 1055 err = nilfs_segctor_apply_buffers( 1056 sci, inode, &data_buffers, sc_ops->collect_data); 1057 if (unlikely(err)) { 1058 /* dispose node list */ 1059 nilfs_segctor_apply_buffers( 1060 sci, inode, &node_buffers, NULL); 1061 goto break_or_fail; 1062 } 1063 sci->sc_stage.flags |= NILFS_CF_NODE; 1064 } 1065 /* Collect node */ 1066 err = nilfs_segctor_apply_buffers( 1067 sci, inode, &node_buffers, sc_ops->collect_node); 1068 if (unlikely(err)) 1069 goto break_or_fail; 1070 1071 nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers); 1072 err = nilfs_segctor_apply_buffers( 1073 sci, inode, &node_buffers, sc_ops->collect_bmap); 1074 if (unlikely(err)) 1075 goto break_or_fail; 1076 1077 nilfs_segctor_end_finfo(sci, inode); 1078 sci->sc_stage.flags &= ~NILFS_CF_NODE; 1079 1080 break_or_fail: 1081 return err; 1082 } 1083 1084 static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci, 1085 struct inode *inode) 1086 { 1087 LIST_HEAD(data_buffers); 1088 size_t n, rest = nilfs_segctor_buffer_rest(sci); 1089 int err; 1090 1091 n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1, 1092 sci->sc_dsync_start, 1093 sci->sc_dsync_end); 1094 1095 err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers, 1096 nilfs_collect_file_data); 1097 if (!err) { 1098 nilfs_segctor_end_finfo(sci, inode); 1099 BUG_ON(n > rest); 1100 /* always receive -E2BIG or true error if n > rest */ 1101 } 1102 return err; 1103 } 1104 1105 /** 1106 * nilfs_free_segments - free the segments given by an array of segment numbers 1107 * @nilfs: nilfs object 1108 * @segnumv: array of segment numbers to be freed 1109 * @nsegs: number of segments to be freed in @segnumv 1110 * 1111 * nilfs_free_segments() wraps nilfs_sufile_freev() and 1112 * nilfs_sufile_cancel_freev(), and edits the segment usage metadata file 1113 * (sufile) to free all segments given by @segnumv and @nsegs at once. If 1114 * it fails midway, it cancels the changes so that none of the segments are 1115 * freed. If @nsegs is 0, this function does nothing. 1116 * 1117 * The freeing of segments is not finalized until the writing of a log with 1118 * a super root block containing this sufile change is complete, and it can 1119 * be canceled with nilfs_sufile_cancel_freev() until then. 1120 * 1121 * Return: 0 on success, or the following negative error code on failure. 1122 * * %-EINVAL - Invalid segment number. 1123 * * %-EIO - I/O error (including metadata corruption). 1124 * * %-ENOMEM - Insufficient memory available. 1125 */ 1126 static int nilfs_free_segments(struct the_nilfs *nilfs, __u64 *segnumv, 1127 size_t nsegs) 1128 { 1129 size_t ndone; 1130 int ret; 1131 1132 if (!nsegs) 1133 return 0; 1134 1135 ret = nilfs_sufile_freev(nilfs->ns_sufile, segnumv, nsegs, &ndone); 1136 if (unlikely(ret)) { 1137 nilfs_sufile_cancel_freev(nilfs->ns_sufile, segnumv, ndone, 1138 NULL); 1139 /* 1140 * If a segment usage of the segments to be freed is in a 1141 * hole block, nilfs_sufile_freev() will return -ENOENT. 1142 * In this case, -EINVAL should be returned to the caller 1143 * since there is something wrong with the given segment 1144 * number array. This error can only occur during GC, so 1145 * there is no need to worry about it propagating to other 1146 * callers (such as fsync). 1147 */ 1148 if (ret == -ENOENT) { 1149 nilfs_err(nilfs->ns_sb, 1150 "The segment usage entry %llu to be freed is invalid (in a hole)", 1151 (unsigned long long)segnumv[ndone]); 1152 ret = -EINVAL; 1153 } 1154 } 1155 return ret; 1156 } 1157 1158 static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) 1159 { 1160 struct the_nilfs *nilfs = sci->sc_super->s_fs_info; 1161 struct list_head *head; 1162 struct nilfs_inode_info *ii; 1163 int err = 0; 1164 1165 switch (nilfs_sc_cstage_get(sci)) { 1166 case NILFS_ST_INIT: 1167 /* Pre-processes */ 1168 sci->sc_stage.flags = 0; 1169 1170 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) { 1171 sci->sc_nblk_inc = 0; 1172 sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN; 1173 if (mode == SC_LSEG_DSYNC) { 1174 nilfs_sc_cstage_set(sci, NILFS_ST_DSYNC); 1175 goto dsync_mode; 1176 } 1177 } 1178 1179 sci->sc_stage.dirty_file_ptr = NULL; 1180 sci->sc_stage.gc_inode_ptr = NULL; 1181 if (mode == SC_FLUSH_DAT) { 1182 nilfs_sc_cstage_set(sci, NILFS_ST_DAT); 1183 goto dat_stage; 1184 } 1185 nilfs_sc_cstage_inc(sci); 1186 fallthrough; 1187 case NILFS_ST_GC: 1188 if (nilfs_doing_gc()) { 1189 head = &sci->sc_gc_inodes; 1190 ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr, 1191 head, i_dirty); 1192 list_for_each_entry_continue(ii, head, i_dirty) { 1193 err = nilfs_segctor_scan_file( 1194 sci, &ii->vfs_inode, 1195 &nilfs_sc_file_ops); 1196 if (unlikely(err)) { 1197 sci->sc_stage.gc_inode_ptr = list_entry( 1198 ii->i_dirty.prev, 1199 struct nilfs_inode_info, 1200 i_dirty); 1201 goto break_or_fail; 1202 } 1203 set_bit(NILFS_I_COLLECTED, &ii->i_state); 1204 } 1205 sci->sc_stage.gc_inode_ptr = NULL; 1206 } 1207 nilfs_sc_cstage_inc(sci); 1208 fallthrough; 1209 case NILFS_ST_FILE: 1210 head = &sci->sc_dirty_files; 1211 ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head, 1212 i_dirty); 1213 list_for_each_entry_continue(ii, head, i_dirty) { 1214 clear_bit(NILFS_I_DIRTY, &ii->i_state); 1215 1216 err = nilfs_segctor_scan_file(sci, &ii->vfs_inode, 1217 &nilfs_sc_file_ops); 1218 if (unlikely(err)) { 1219 sci->sc_stage.dirty_file_ptr = 1220 list_entry(ii->i_dirty.prev, 1221 struct nilfs_inode_info, 1222 i_dirty); 1223 goto break_or_fail; 1224 } 1225 /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */ 1226 /* XXX: required ? */ 1227 } 1228 sci->sc_stage.dirty_file_ptr = NULL; 1229 if (mode == SC_FLUSH_FILE) { 1230 nilfs_sc_cstage_set(sci, NILFS_ST_DONE); 1231 return 0; 1232 } 1233 nilfs_sc_cstage_inc(sci); 1234 sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED; 1235 fallthrough; 1236 case NILFS_ST_IFILE: 1237 err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile, 1238 &nilfs_sc_file_ops); 1239 if (unlikely(err)) 1240 break; 1241 nilfs_sc_cstage_inc(sci); 1242 /* Creating a checkpoint */ 1243 err = nilfs_cpfile_create_checkpoint(nilfs->ns_cpfile, 1244 nilfs->ns_cno); 1245 if (unlikely(err)) 1246 break; 1247 fallthrough; 1248 case NILFS_ST_CPFILE: 1249 err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile, 1250 &nilfs_sc_file_ops); 1251 if (unlikely(err)) 1252 break; 1253 nilfs_sc_cstage_inc(sci); 1254 fallthrough; 1255 case NILFS_ST_SUFILE: 1256 err = nilfs_free_segments(nilfs, sci->sc_freesegs, 1257 sci->sc_nfreesegs); 1258 if (unlikely(err)) 1259 break; 1260 sci->sc_stage.flags |= NILFS_CF_SUFREED; 1261 1262 err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile, 1263 &nilfs_sc_file_ops); 1264 if (unlikely(err)) 1265 break; 1266 nilfs_sc_cstage_inc(sci); 1267 fallthrough; 1268 case NILFS_ST_DAT: 1269 dat_stage: 1270 err = nilfs_segctor_scan_file(sci, nilfs->ns_dat, 1271 &nilfs_sc_dat_ops); 1272 if (unlikely(err)) 1273 break; 1274 if (mode == SC_FLUSH_DAT) { 1275 nilfs_sc_cstage_set(sci, NILFS_ST_DONE); 1276 return 0; 1277 } 1278 nilfs_sc_cstage_inc(sci); 1279 fallthrough; 1280 case NILFS_ST_SR: 1281 if (mode == SC_LSEG_SR) { 1282 /* Appending a super root */ 1283 err = nilfs_segctor_add_super_root(sci); 1284 if (unlikely(err)) 1285 break; 1286 } 1287 /* End of a logical segment */ 1288 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND; 1289 nilfs_sc_cstage_set(sci, NILFS_ST_DONE); 1290 return 0; 1291 case NILFS_ST_DSYNC: 1292 dsync_mode: 1293 sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT; 1294 ii = sci->sc_dsync_inode; 1295 if (!test_bit(NILFS_I_BUSY, &ii->i_state)) 1296 break; 1297 1298 err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode); 1299 if (unlikely(err)) 1300 break; 1301 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND; 1302 nilfs_sc_cstage_set(sci, NILFS_ST_DONE); 1303 return 0; 1304 case NILFS_ST_DONE: 1305 return 0; 1306 default: 1307 BUG(); 1308 } 1309 1310 break_or_fail: 1311 return err; 1312 } 1313 1314 /** 1315 * nilfs_segctor_begin_construction - setup segment buffer to make a new log 1316 * @sci: nilfs_sc_info 1317 * @nilfs: nilfs object 1318 */ 1319 static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci, 1320 struct the_nilfs *nilfs) 1321 { 1322 struct nilfs_segment_buffer *segbuf, *prev; 1323 __u64 nextnum; 1324 int err, alloc = 0; 1325 1326 segbuf = nilfs_segbuf_new(sci->sc_super); 1327 if (unlikely(!segbuf)) 1328 return -ENOMEM; 1329 1330 if (list_empty(&sci->sc_write_logs)) { 1331 nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 1332 nilfs->ns_pseg_offset, nilfs); 1333 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) { 1334 nilfs_shift_to_next_segment(nilfs); 1335 nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs); 1336 } 1337 1338 segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq; 1339 nextnum = nilfs->ns_nextnum; 1340 1341 if (nilfs->ns_segnum == nilfs->ns_nextnum) 1342 /* Start from the head of a new full segment */ 1343 alloc++; 1344 } else { 1345 /* Continue logs */ 1346 prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs); 1347 nilfs_segbuf_map_cont(segbuf, prev); 1348 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq; 1349 nextnum = prev->sb_nextnum; 1350 1351 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) { 1352 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs); 1353 segbuf->sb_sum.seg_seq++; 1354 alloc++; 1355 } 1356 } 1357 1358 err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum); 1359 if (err) 1360 goto failed; 1361 1362 if (alloc) { 1363 err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum); 1364 if (err) 1365 goto failed; 1366 } 1367 nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs); 1368 1369 BUG_ON(!list_empty(&sci->sc_segbufs)); 1370 list_add_tail(&segbuf->sb_list, &sci->sc_segbufs); 1371 sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks; 1372 return 0; 1373 1374 failed: 1375 nilfs_segbuf_free(segbuf); 1376 return err; 1377 } 1378 1379 static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci, 1380 struct the_nilfs *nilfs, int nadd) 1381 { 1382 struct nilfs_segment_buffer *segbuf, *prev; 1383 struct inode *sufile = nilfs->ns_sufile; 1384 __u64 nextnextnum; 1385 LIST_HEAD(list); 1386 int err, ret, i; 1387 1388 prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs); 1389 /* 1390 * Since the segment specified with nextnum might be allocated during 1391 * the previous construction, the buffer including its segusage may 1392 * not be dirty. The following call ensures that the buffer is dirty 1393 * and will pin the buffer on memory until the sufile is written. 1394 */ 1395 err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum); 1396 if (unlikely(err)) 1397 return err; 1398 1399 for (i = 0; i < nadd; i++) { 1400 /* extend segment info */ 1401 err = -ENOMEM; 1402 segbuf = nilfs_segbuf_new(sci->sc_super); 1403 if (unlikely(!segbuf)) 1404 goto failed; 1405 1406 /* map this buffer to region of segment on-disk */ 1407 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs); 1408 sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks; 1409 1410 /* allocate the next next full segment */ 1411 err = nilfs_sufile_alloc(sufile, &nextnextnum); 1412 if (unlikely(err)) 1413 goto failed_segbuf; 1414 1415 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1; 1416 nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs); 1417 1418 list_add_tail(&segbuf->sb_list, &list); 1419 prev = segbuf; 1420 } 1421 list_splice_tail(&list, &sci->sc_segbufs); 1422 return 0; 1423 1424 failed_segbuf: 1425 nilfs_segbuf_free(segbuf); 1426 failed: 1427 list_for_each_entry(segbuf, &list, sb_list) { 1428 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum); 1429 WARN_ON(ret); /* never fails */ 1430 } 1431 nilfs_destroy_logs(&list); 1432 return err; 1433 } 1434 1435 static void nilfs_free_incomplete_logs(struct list_head *logs, 1436 struct the_nilfs *nilfs) 1437 { 1438 struct nilfs_segment_buffer *segbuf, *prev; 1439 struct inode *sufile = nilfs->ns_sufile; 1440 int ret; 1441 1442 segbuf = NILFS_FIRST_SEGBUF(logs); 1443 if (nilfs->ns_nextnum != segbuf->sb_nextnum) { 1444 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum); 1445 WARN_ON(ret); /* never fails */ 1446 } 1447 if (atomic_read(&segbuf->sb_err)) { 1448 /* Case 1: The first segment failed */ 1449 if (segbuf->sb_pseg_start != segbuf->sb_fseg_start) 1450 /* 1451 * Case 1a: Partial segment appended into an existing 1452 * segment 1453 */ 1454 nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start, 1455 segbuf->sb_fseg_end); 1456 else /* Case 1b: New full segment */ 1457 set_nilfs_discontinued(nilfs); 1458 } 1459 1460 prev = segbuf; 1461 list_for_each_entry_continue(segbuf, logs, sb_list) { 1462 if (prev->sb_nextnum != segbuf->sb_nextnum) { 1463 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum); 1464 WARN_ON(ret); /* never fails */ 1465 } 1466 if (atomic_read(&segbuf->sb_err) && 1467 segbuf->sb_segnum != nilfs->ns_nextnum) 1468 /* Case 2: extended segment (!= next) failed */ 1469 nilfs_sufile_set_error(sufile, segbuf->sb_segnum); 1470 prev = segbuf; 1471 } 1472 } 1473 1474 static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci, 1475 struct inode *sufile) 1476 { 1477 struct nilfs_segment_buffer *segbuf; 1478 unsigned long live_blocks; 1479 int ret; 1480 1481 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { 1482 live_blocks = segbuf->sb_sum.nblocks + 1483 (segbuf->sb_pseg_start - segbuf->sb_fseg_start); 1484 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum, 1485 live_blocks, 1486 sci->sc_seg_ctime); 1487 WARN_ON(ret); /* always succeed because the segusage is dirty */ 1488 } 1489 } 1490 1491 static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile) 1492 { 1493 struct nilfs_segment_buffer *segbuf; 1494 int ret; 1495 1496 segbuf = NILFS_FIRST_SEGBUF(logs); 1497 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum, 1498 segbuf->sb_pseg_start - 1499 segbuf->sb_fseg_start, 0); 1500 WARN_ON(ret); /* always succeed because the segusage is dirty */ 1501 1502 list_for_each_entry_continue(segbuf, logs, sb_list) { 1503 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum, 1504 0, 0); 1505 WARN_ON(ret); /* always succeed */ 1506 } 1507 } 1508 1509 static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci, 1510 struct nilfs_segment_buffer *last, 1511 struct inode *sufile) 1512 { 1513 struct nilfs_segment_buffer *segbuf = last; 1514 int ret; 1515 1516 list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) { 1517 sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks; 1518 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum); 1519 WARN_ON(ret); 1520 } 1521 nilfs_truncate_logs(&sci->sc_segbufs, last); 1522 } 1523 1524 1525 static int nilfs_segctor_collect(struct nilfs_sc_info *sci, 1526 struct the_nilfs *nilfs, int mode) 1527 { 1528 struct nilfs_cstage prev_stage = sci->sc_stage; 1529 int err, nadd = 1; 1530 1531 /* Collection retry loop */ 1532 for (;;) { 1533 sci->sc_nblk_this_inc = 0; 1534 sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs); 1535 1536 err = nilfs_segctor_reset_segment_buffer(sci); 1537 if (unlikely(err)) 1538 goto failed; 1539 1540 err = nilfs_segctor_collect_blocks(sci, mode); 1541 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks; 1542 if (!err) 1543 break; 1544 1545 if (unlikely(err != -E2BIG)) 1546 goto failed; 1547 1548 /* The current segment is filled up */ 1549 if (mode != SC_LSEG_SR || 1550 nilfs_sc_cstage_get(sci) < NILFS_ST_CPFILE) 1551 break; 1552 1553 nilfs_clear_logs(&sci->sc_segbufs); 1554 1555 if (sci->sc_stage.flags & NILFS_CF_SUFREED) { 1556 err = nilfs_sufile_cancel_freev(nilfs->ns_sufile, 1557 sci->sc_freesegs, 1558 sci->sc_nfreesegs, 1559 NULL); 1560 WARN_ON(err); /* do not happen */ 1561 sci->sc_stage.flags &= ~NILFS_CF_SUFREED; 1562 } 1563 1564 err = nilfs_segctor_extend_segments(sci, nilfs, nadd); 1565 if (unlikely(err)) 1566 return err; 1567 1568 nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA); 1569 sci->sc_stage = prev_stage; 1570 } 1571 nilfs_segctor_zeropad_segsum(sci); 1572 nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile); 1573 return 0; 1574 1575 failed: 1576 return err; 1577 } 1578 1579 static void nilfs_list_replace_buffer(struct buffer_head *old_bh, 1580 struct buffer_head *new_bh) 1581 { 1582 BUG_ON(!list_empty(&new_bh->b_assoc_buffers)); 1583 1584 list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers); 1585 /* The caller must release old_bh */ 1586 } 1587 1588 static int 1589 nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci, 1590 struct nilfs_segment_buffer *segbuf, 1591 int mode) 1592 { 1593 struct inode *inode = NULL; 1594 sector_t blocknr; 1595 unsigned long nfinfo = segbuf->sb_sum.nfinfo; 1596 unsigned long nblocks = 0, ndatablk = 0; 1597 const struct nilfs_sc_operations *sc_op = NULL; 1598 struct nilfs_segsum_pointer ssp; 1599 struct nilfs_finfo *finfo = NULL; 1600 union nilfs_binfo binfo; 1601 struct buffer_head *bh, *bh_org; 1602 ino_t ino = 0; 1603 int err = 0; 1604 1605 if (!nfinfo) 1606 goto out; 1607 1608 blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk; 1609 ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers); 1610 ssp.offset = sizeof(struct nilfs_segment_summary); 1611 1612 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { 1613 if (bh == segbuf->sb_super_root) 1614 break; 1615 if (!finfo) { 1616 finfo = nilfs_segctor_map_segsum_entry( 1617 sci, &ssp, sizeof(*finfo)); 1618 ino = le64_to_cpu(finfo->fi_ino); 1619 nblocks = le32_to_cpu(finfo->fi_nblocks); 1620 ndatablk = le32_to_cpu(finfo->fi_ndatablk); 1621 1622 inode = bh->b_folio->mapping->host; 1623 1624 if (mode == SC_LSEG_DSYNC) 1625 sc_op = &nilfs_sc_dsync_ops; 1626 else if (ino == NILFS_DAT_INO) 1627 sc_op = &nilfs_sc_dat_ops; 1628 else /* file blocks */ 1629 sc_op = &nilfs_sc_file_ops; 1630 } 1631 bh_org = bh; 1632 get_bh(bh_org); 1633 err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr, 1634 &binfo); 1635 if (bh != bh_org) 1636 nilfs_list_replace_buffer(bh_org, bh); 1637 brelse(bh_org); 1638 if (unlikely(err)) 1639 goto failed_bmap; 1640 1641 if (ndatablk > 0) 1642 sc_op->write_data_binfo(sci, &ssp, &binfo); 1643 else 1644 sc_op->write_node_binfo(sci, &ssp, &binfo); 1645 1646 blocknr++; 1647 if (--nblocks == 0) { 1648 finfo = NULL; 1649 if (--nfinfo == 0) 1650 break; 1651 } else if (ndatablk > 0) 1652 ndatablk--; 1653 } 1654 out: 1655 return 0; 1656 1657 failed_bmap: 1658 return err; 1659 } 1660 1661 static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode) 1662 { 1663 struct nilfs_segment_buffer *segbuf; 1664 int err; 1665 1666 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { 1667 err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode); 1668 if (unlikely(err)) 1669 return err; 1670 nilfs_segbuf_fill_in_segsum(segbuf); 1671 } 1672 return 0; 1673 } 1674 1675 static void nilfs_begin_folio_io(struct folio *folio) 1676 { 1677 if (!folio || folio_test_writeback(folio)) 1678 /* 1679 * For split b-tree node pages, this function may be called 1680 * twice. We ignore the 2nd or later calls by this check. 1681 */ 1682 return; 1683 1684 folio_lock(folio); 1685 folio_clear_dirty_for_io(folio); 1686 folio_start_writeback(folio); 1687 folio_unlock(folio); 1688 } 1689 1690 /** 1691 * nilfs_prepare_write_logs - prepare to write logs 1692 * @logs: logs to prepare for writing 1693 * @seed: checksum seed value 1694 * 1695 * nilfs_prepare_write_logs() adds checksums and prepares the block 1696 * buffers/folios for writing logs. In order to stabilize folios of 1697 * memory-mapped file blocks by putting them in writeback state before 1698 * calculating the checksums, first prepare to write payload blocks other 1699 * than segment summary and super root blocks in which the checksums will 1700 * be embedded. 1701 */ 1702 static void nilfs_prepare_write_logs(struct list_head *logs, u32 seed) 1703 { 1704 struct nilfs_segment_buffer *segbuf; 1705 struct folio *bd_folio = NULL, *fs_folio = NULL; 1706 struct buffer_head *bh; 1707 1708 /* Prepare to write payload blocks */ 1709 list_for_each_entry(segbuf, logs, sb_list) { 1710 list_for_each_entry(bh, &segbuf->sb_payload_buffers, 1711 b_assoc_buffers) { 1712 if (bh == segbuf->sb_super_root) 1713 break; 1714 set_buffer_async_write(bh); 1715 if (bh->b_folio != fs_folio) { 1716 nilfs_begin_folio_io(fs_folio); 1717 fs_folio = bh->b_folio; 1718 } 1719 } 1720 } 1721 nilfs_begin_folio_io(fs_folio); 1722 1723 nilfs_add_checksums_on_logs(logs, seed); 1724 1725 /* Prepare to write segment summary blocks */ 1726 list_for_each_entry(segbuf, logs, sb_list) { 1727 list_for_each_entry(bh, &segbuf->sb_segsum_buffers, 1728 b_assoc_buffers) { 1729 mark_buffer_dirty(bh); 1730 if (bh->b_folio == bd_folio) 1731 continue; 1732 if (bd_folio) { 1733 folio_lock(bd_folio); 1734 folio_wait_writeback(bd_folio); 1735 folio_clear_dirty_for_io(bd_folio); 1736 folio_start_writeback(bd_folio); 1737 folio_unlock(bd_folio); 1738 } 1739 bd_folio = bh->b_folio; 1740 } 1741 } 1742 1743 /* Prepare to write super root block */ 1744 bh = NILFS_LAST_SEGBUF(logs)->sb_super_root; 1745 if (bh) { 1746 mark_buffer_dirty(bh); 1747 if (bh->b_folio != bd_folio) { 1748 folio_lock(bd_folio); 1749 folio_wait_writeback(bd_folio); 1750 folio_clear_dirty_for_io(bd_folio); 1751 folio_start_writeback(bd_folio); 1752 folio_unlock(bd_folio); 1753 bd_folio = bh->b_folio; 1754 } 1755 } 1756 1757 if (bd_folio) { 1758 folio_lock(bd_folio); 1759 folio_wait_writeback(bd_folio); 1760 folio_clear_dirty_for_io(bd_folio); 1761 folio_start_writeback(bd_folio); 1762 folio_unlock(bd_folio); 1763 } 1764 } 1765 1766 static int nilfs_segctor_write(struct nilfs_sc_info *sci, 1767 struct the_nilfs *nilfs) 1768 { 1769 int ret; 1770 1771 ret = nilfs_write_logs(&sci->sc_segbufs, nilfs); 1772 list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs); 1773 return ret; 1774 } 1775 1776 static void nilfs_end_folio_io(struct folio *folio, int err) 1777 { 1778 if (!folio) 1779 return; 1780 1781 if (buffer_nilfs_node(folio_buffers(folio)) && 1782 !folio_test_writeback(folio)) { 1783 /* 1784 * For b-tree node pages, this function may be called twice 1785 * or more because they might be split in a segment. 1786 */ 1787 if (folio_test_dirty(folio)) { 1788 /* 1789 * For pages holding split b-tree node buffers, dirty 1790 * flag on the buffers may be cleared discretely. 1791 * In that case, the page is once redirtied for 1792 * remaining buffers, and it must be cancelled if 1793 * all the buffers get cleaned later. 1794 */ 1795 folio_lock(folio); 1796 if (nilfs_folio_buffers_clean(folio)) 1797 __nilfs_clear_folio_dirty(folio); 1798 folio_unlock(folio); 1799 } 1800 return; 1801 } 1802 1803 if (err || !nilfs_folio_buffers_clean(folio)) 1804 filemap_dirty_folio(folio->mapping, folio); 1805 1806 folio_end_writeback(folio); 1807 } 1808 1809 static void nilfs_abort_logs(struct list_head *logs, int err) 1810 { 1811 struct nilfs_segment_buffer *segbuf; 1812 struct folio *bd_folio = NULL, *fs_folio = NULL; 1813 struct buffer_head *bh; 1814 1815 if (list_empty(logs)) 1816 return; 1817 1818 list_for_each_entry(segbuf, logs, sb_list) { 1819 list_for_each_entry(bh, &segbuf->sb_segsum_buffers, 1820 b_assoc_buffers) { 1821 clear_buffer_uptodate(bh); 1822 if (bh->b_folio != bd_folio) { 1823 if (bd_folio) 1824 folio_end_writeback(bd_folio); 1825 bd_folio = bh->b_folio; 1826 } 1827 } 1828 1829 list_for_each_entry(bh, &segbuf->sb_payload_buffers, 1830 b_assoc_buffers) { 1831 if (bh == segbuf->sb_super_root) { 1832 clear_buffer_uptodate(bh); 1833 if (bh->b_folio != bd_folio) { 1834 folio_end_writeback(bd_folio); 1835 bd_folio = bh->b_folio; 1836 } 1837 break; 1838 } 1839 clear_buffer_async_write(bh); 1840 if (bh->b_folio != fs_folio) { 1841 nilfs_end_folio_io(fs_folio, err); 1842 fs_folio = bh->b_folio; 1843 } 1844 } 1845 } 1846 if (bd_folio) 1847 folio_end_writeback(bd_folio); 1848 1849 nilfs_end_folio_io(fs_folio, err); 1850 } 1851 1852 static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci, 1853 struct the_nilfs *nilfs, int err) 1854 { 1855 LIST_HEAD(logs); 1856 int ret; 1857 1858 list_splice_tail_init(&sci->sc_write_logs, &logs); 1859 ret = nilfs_wait_on_logs(&logs); 1860 nilfs_abort_logs(&logs, ret ? : err); 1861 1862 list_splice_tail_init(&sci->sc_segbufs, &logs); 1863 if (list_empty(&logs)) 1864 return; /* if the first segment buffer preparation failed */ 1865 1866 nilfs_cancel_segusage(&logs, nilfs->ns_sufile); 1867 nilfs_free_incomplete_logs(&logs, nilfs); 1868 1869 if (sci->sc_stage.flags & NILFS_CF_SUFREED) { 1870 ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile, 1871 sci->sc_freesegs, 1872 sci->sc_nfreesegs, 1873 NULL); 1874 WARN_ON(ret); /* do not happen */ 1875 } 1876 1877 nilfs_destroy_logs(&logs); 1878 } 1879 1880 static void nilfs_set_next_segment(struct the_nilfs *nilfs, 1881 struct nilfs_segment_buffer *segbuf) 1882 { 1883 nilfs->ns_segnum = segbuf->sb_segnum; 1884 nilfs->ns_nextnum = segbuf->sb_nextnum; 1885 nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start 1886 + segbuf->sb_sum.nblocks; 1887 nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq; 1888 nilfs->ns_ctime = segbuf->sb_sum.ctime; 1889 } 1890 1891 static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci) 1892 { 1893 struct nilfs_segment_buffer *segbuf; 1894 struct folio *bd_folio = NULL, *fs_folio = NULL; 1895 struct the_nilfs *nilfs = sci->sc_super->s_fs_info; 1896 int update_sr = false; 1897 1898 list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) { 1899 struct buffer_head *bh; 1900 1901 list_for_each_entry(bh, &segbuf->sb_segsum_buffers, 1902 b_assoc_buffers) { 1903 set_buffer_uptodate(bh); 1904 clear_buffer_dirty(bh); 1905 if (bh->b_folio != bd_folio) { 1906 if (bd_folio) 1907 folio_end_writeback(bd_folio); 1908 bd_folio = bh->b_folio; 1909 } 1910 } 1911 /* 1912 * We assume that the buffers which belong to the same folio 1913 * continue over the buffer list. 1914 * Under this assumption, the last BHs of folios is 1915 * identifiable by the discontinuity of bh->b_folio 1916 * (folio != fs_folio). 1917 * 1918 * For B-tree node blocks, however, this assumption is not 1919 * guaranteed. The cleanup code of B-tree node folios needs 1920 * special care. 1921 */ 1922 list_for_each_entry(bh, &segbuf->sb_payload_buffers, 1923 b_assoc_buffers) { 1924 const unsigned long set_bits = BIT(BH_Uptodate); 1925 const unsigned long clear_bits = 1926 (BIT(BH_Dirty) | BIT(BH_Async_Write) | 1927 BIT(BH_Delay) | BIT(BH_NILFS_Volatile) | 1928 BIT(BH_NILFS_Redirected)); 1929 1930 if (bh == segbuf->sb_super_root) { 1931 set_buffer_uptodate(bh); 1932 clear_buffer_dirty(bh); 1933 if (bh->b_folio != bd_folio) { 1934 folio_end_writeback(bd_folio); 1935 bd_folio = bh->b_folio; 1936 } 1937 update_sr = true; 1938 break; 1939 } 1940 set_mask_bits(&bh->b_state, clear_bits, set_bits); 1941 if (bh->b_folio != fs_folio) { 1942 nilfs_end_folio_io(fs_folio, 0); 1943 fs_folio = bh->b_folio; 1944 } 1945 } 1946 1947 if (!nilfs_segbuf_simplex(segbuf)) { 1948 if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) { 1949 set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags); 1950 sci->sc_lseg_stime = jiffies; 1951 } 1952 if (segbuf->sb_sum.flags & NILFS_SS_LOGEND) 1953 clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags); 1954 } 1955 } 1956 /* 1957 * Since folios may continue over multiple segment buffers, 1958 * end of the last folio must be checked outside of the loop. 1959 */ 1960 if (bd_folio) 1961 folio_end_writeback(bd_folio); 1962 1963 nilfs_end_folio_io(fs_folio, 0); 1964 1965 nilfs_drop_collected_inodes(&sci->sc_dirty_files); 1966 1967 if (nilfs_doing_gc()) 1968 nilfs_drop_collected_inodes(&sci->sc_gc_inodes); 1969 else 1970 nilfs->ns_nongc_ctime = sci->sc_seg_ctime; 1971 1972 sci->sc_nblk_inc += sci->sc_nblk_this_inc; 1973 1974 segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs); 1975 nilfs_set_next_segment(nilfs, segbuf); 1976 1977 if (update_sr) { 1978 nilfs->ns_flushed_device = 0; 1979 nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start, 1980 segbuf->sb_sum.seg_seq, nilfs->ns_cno++); 1981 1982 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags); 1983 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags); 1984 set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags); 1985 nilfs_segctor_clear_metadata_dirty(sci); 1986 } else 1987 clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags); 1988 } 1989 1990 static int nilfs_segctor_wait(struct nilfs_sc_info *sci) 1991 { 1992 int ret; 1993 1994 ret = nilfs_wait_on_logs(&sci->sc_write_logs); 1995 if (!ret) { 1996 nilfs_segctor_complete_write(sci); 1997 nilfs_destroy_logs(&sci->sc_write_logs); 1998 } 1999 return ret; 2000 } 2001 2002 static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci, 2003 struct the_nilfs *nilfs) 2004 { 2005 struct nilfs_inode_info *ii, *n; 2006 struct inode *ifile = sci->sc_root->ifile; 2007 2008 spin_lock(&nilfs->ns_inode_lock); 2009 retry: 2010 list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) { 2011 if (!ii->i_bh) { 2012 struct buffer_head *ibh; 2013 int err; 2014 2015 spin_unlock(&nilfs->ns_inode_lock); 2016 err = nilfs_ifile_get_inode_block( 2017 ifile, ii->vfs_inode.i_ino, &ibh); 2018 if (unlikely(err)) { 2019 nilfs_warn(sci->sc_super, 2020 "log writer: error %d getting inode block (ino=%lu)", 2021 err, ii->vfs_inode.i_ino); 2022 return err; 2023 } 2024 spin_lock(&nilfs->ns_inode_lock); 2025 if (likely(!ii->i_bh)) 2026 ii->i_bh = ibh; 2027 else 2028 brelse(ibh); 2029 goto retry; 2030 } 2031 2032 // Always redirty the buffer to avoid race condition 2033 mark_buffer_dirty(ii->i_bh); 2034 nilfs_mdt_mark_dirty(ifile); 2035 2036 clear_bit(NILFS_I_QUEUED, &ii->i_state); 2037 set_bit(NILFS_I_BUSY, &ii->i_state); 2038 list_move_tail(&ii->i_dirty, &sci->sc_dirty_files); 2039 } 2040 spin_unlock(&nilfs->ns_inode_lock); 2041 2042 return 0; 2043 } 2044 2045 static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, 2046 struct the_nilfs *nilfs) 2047 { 2048 struct nilfs_inode_info *ii, *n; 2049 int during_mount = !(sci->sc_super->s_flags & SB_ACTIVE); 2050 int defer_iput = false; 2051 2052 spin_lock(&nilfs->ns_inode_lock); 2053 list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) { 2054 if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) || 2055 test_bit(NILFS_I_DIRTY, &ii->i_state)) 2056 continue; 2057 2058 clear_bit(NILFS_I_BUSY, &ii->i_state); 2059 brelse(ii->i_bh); 2060 ii->i_bh = NULL; 2061 list_del_init(&ii->i_dirty); 2062 if (!ii->vfs_inode.i_nlink || during_mount) { 2063 /* 2064 * Defer calling iput() to avoid deadlocks if 2065 * i_nlink == 0 or mount is not yet finished. 2066 */ 2067 list_add_tail(&ii->i_dirty, &sci->sc_iput_queue); 2068 defer_iput = true; 2069 } else { 2070 spin_unlock(&nilfs->ns_inode_lock); 2071 iput(&ii->vfs_inode); 2072 spin_lock(&nilfs->ns_inode_lock); 2073 } 2074 } 2075 spin_unlock(&nilfs->ns_inode_lock); 2076 2077 if (defer_iput) 2078 schedule_work(&sci->sc_iput_work); 2079 } 2080 2081 /* 2082 * Main procedure of segment constructor 2083 */ 2084 static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode) 2085 { 2086 struct the_nilfs *nilfs = sci->sc_super->s_fs_info; 2087 int err; 2088 2089 if (sb_rdonly(sci->sc_super)) 2090 return -EROFS; 2091 2092 nilfs_sc_cstage_set(sci, NILFS_ST_INIT); 2093 sci->sc_cno = nilfs->ns_cno; 2094 2095 err = nilfs_segctor_collect_dirty_files(sci, nilfs); 2096 if (unlikely(err)) 2097 goto out; 2098 2099 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root)) 2100 set_bit(NILFS_SC_DIRTY, &sci->sc_flags); 2101 2102 if (nilfs_segctor_clean(sci)) 2103 goto out; 2104 2105 do { 2106 sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK; 2107 2108 err = nilfs_segctor_begin_construction(sci, nilfs); 2109 if (unlikely(err)) 2110 goto failed; 2111 2112 /* Update time stamp */ 2113 sci->sc_seg_ctime = ktime_get_real_seconds(); 2114 2115 err = nilfs_segctor_collect(sci, nilfs, mode); 2116 if (unlikely(err)) 2117 goto failed; 2118 2119 /* Avoid empty segment */ 2120 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE && 2121 nilfs_segbuf_empty(sci->sc_curseg)) { 2122 nilfs_segctor_abort_construction(sci, nilfs, 1); 2123 goto out; 2124 } 2125 2126 err = nilfs_segctor_assign(sci, mode); 2127 if (unlikely(err)) 2128 goto failed; 2129 2130 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED) 2131 nilfs_segctor_fill_in_file_bmap(sci); 2132 2133 if (mode == SC_LSEG_SR && 2134 nilfs_sc_cstage_get(sci) >= NILFS_ST_CPFILE) { 2135 err = nilfs_cpfile_finalize_checkpoint( 2136 nilfs->ns_cpfile, nilfs->ns_cno, sci->sc_root, 2137 sci->sc_nblk_inc + sci->sc_nblk_this_inc, 2138 sci->sc_seg_ctime, 2139 !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags)); 2140 if (unlikely(err)) 2141 goto failed_to_write; 2142 2143 nilfs_segctor_fill_in_super_root(sci, nilfs); 2144 } 2145 nilfs_segctor_update_segusage(sci, nilfs->ns_sufile); 2146 2147 /* Write partial segments */ 2148 nilfs_prepare_write_logs(&sci->sc_segbufs, nilfs->ns_crc_seed); 2149 2150 err = nilfs_segctor_write(sci, nilfs); 2151 if (unlikely(err)) 2152 goto failed_to_write; 2153 2154 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE || 2155 nilfs->ns_blocksize_bits != PAGE_SHIFT) { 2156 /* 2157 * At this point, we avoid double buffering 2158 * for blocksize < pagesize because page dirty 2159 * flag is turned off during write and dirty 2160 * buffers are not properly collected for 2161 * pages crossing over segments. 2162 */ 2163 err = nilfs_segctor_wait(sci); 2164 if (err) 2165 goto failed_to_write; 2166 } 2167 } while (nilfs_sc_cstage_get(sci) != NILFS_ST_DONE); 2168 2169 out: 2170 nilfs_segctor_drop_written_files(sci, nilfs); 2171 return err; 2172 2173 failed_to_write: 2174 failed: 2175 if (mode == SC_LSEG_SR && nilfs_sc_cstage_get(sci) >= NILFS_ST_IFILE) 2176 nilfs_redirty_inodes(&sci->sc_dirty_files); 2177 if (nilfs_doing_gc()) 2178 nilfs_redirty_inodes(&sci->sc_gc_inodes); 2179 nilfs_segctor_abort_construction(sci, nilfs, err); 2180 goto out; 2181 } 2182 2183 /** 2184 * nilfs_segctor_start_timer - set timer of background write 2185 * @sci: nilfs_sc_info 2186 * 2187 * If the timer has already been set, it ignores the new request. 2188 * This function MUST be called within a section locking the segment 2189 * semaphore. 2190 */ 2191 static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci) 2192 { 2193 spin_lock(&sci->sc_state_lock); 2194 if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) { 2195 if (sci->sc_task) { 2196 sci->sc_timer.expires = jiffies + sci->sc_interval; 2197 add_timer(&sci->sc_timer); 2198 } 2199 sci->sc_state |= NILFS_SEGCTOR_COMMIT; 2200 } 2201 spin_unlock(&sci->sc_state_lock); 2202 } 2203 2204 static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn) 2205 { 2206 spin_lock(&sci->sc_state_lock); 2207 if (!(sci->sc_flush_request & BIT(bn))) { 2208 unsigned long prev_req = sci->sc_flush_request; 2209 2210 sci->sc_flush_request |= BIT(bn); 2211 if (!prev_req) 2212 wake_up(&sci->sc_wait_daemon); 2213 } 2214 spin_unlock(&sci->sc_state_lock); 2215 } 2216 2217 /** 2218 * nilfs_flush_segment - trigger a segment construction for resource control 2219 * @sb: super block 2220 * @ino: inode number of the file to be flushed out. 2221 */ 2222 void nilfs_flush_segment(struct super_block *sb, ino_t ino) 2223 { 2224 struct the_nilfs *nilfs = sb->s_fs_info; 2225 struct nilfs_sc_info *sci = nilfs->ns_writer; 2226 2227 if (!sci || nilfs_doing_construction()) 2228 return; 2229 nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0); 2230 /* assign bit 0 to data files */ 2231 } 2232 2233 struct nilfs_segctor_wait_request { 2234 wait_queue_entry_t wq; 2235 __u32 seq; 2236 int err; 2237 atomic_t done; 2238 }; 2239 2240 static int nilfs_segctor_sync(struct nilfs_sc_info *sci) 2241 { 2242 struct nilfs_segctor_wait_request wait_req; 2243 int err = 0; 2244 2245 init_wait(&wait_req.wq); 2246 wait_req.err = 0; 2247 atomic_set(&wait_req.done, 0); 2248 init_waitqueue_entry(&wait_req.wq, current); 2249 2250 /* 2251 * To prevent a race issue where completion notifications from the 2252 * log writer thread are missed, increment the request sequence count 2253 * "sc_seq_request" and insert a wait queue entry using the current 2254 * sequence number into the "sc_wait_request" queue at the same time 2255 * within the lock section of "sc_state_lock". 2256 */ 2257 spin_lock(&sci->sc_state_lock); 2258 wait_req.seq = ++sci->sc_seq_request; 2259 add_wait_queue(&sci->sc_wait_request, &wait_req.wq); 2260 spin_unlock(&sci->sc_state_lock); 2261 2262 wake_up(&sci->sc_wait_daemon); 2263 2264 for (;;) { 2265 set_current_state(TASK_INTERRUPTIBLE); 2266 2267 /* 2268 * Synchronize only while the log writer thread is alive. 2269 * Leave flushing out after the log writer thread exits to 2270 * the cleanup work in nilfs_segctor_destroy(). 2271 */ 2272 if (!sci->sc_task) 2273 break; 2274 2275 if (atomic_read(&wait_req.done)) { 2276 err = wait_req.err; 2277 break; 2278 } 2279 if (!signal_pending(current)) { 2280 schedule(); 2281 continue; 2282 } 2283 err = -ERESTARTSYS; 2284 break; 2285 } 2286 finish_wait(&sci->sc_wait_request, &wait_req.wq); 2287 return err; 2288 } 2289 2290 static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err, bool force) 2291 { 2292 struct nilfs_segctor_wait_request *wrq, *n; 2293 unsigned long flags; 2294 2295 spin_lock_irqsave(&sci->sc_wait_request.lock, flags); 2296 list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) { 2297 if (!atomic_read(&wrq->done) && 2298 (force || nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq))) { 2299 wrq->err = err; 2300 atomic_set(&wrq->done, 1); 2301 } 2302 if (atomic_read(&wrq->done)) { 2303 wrq->wq.func(&wrq->wq, 2304 TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 2305 0, NULL); 2306 } 2307 } 2308 spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags); 2309 } 2310 2311 /** 2312 * nilfs_construct_segment - construct a logical segment 2313 * @sb: super block 2314 * 2315 * Return Value: On success, 0 is returned. On errors, one of the following 2316 * negative error code is returned. 2317 * 2318 * %-EROFS - Read only filesystem. 2319 * 2320 * %-EIO - I/O error 2321 * 2322 * %-ENOSPC - No space left on device (only in a panic state). 2323 * 2324 * %-ERESTARTSYS - Interrupted. 2325 * 2326 * %-ENOMEM - Insufficient memory available. 2327 */ 2328 int nilfs_construct_segment(struct super_block *sb) 2329 { 2330 struct the_nilfs *nilfs = sb->s_fs_info; 2331 struct nilfs_sc_info *sci = nilfs->ns_writer; 2332 struct nilfs_transaction_info *ti; 2333 2334 if (sb_rdonly(sb) || unlikely(!sci)) 2335 return -EROFS; 2336 2337 /* A call inside transactions causes a deadlock. */ 2338 BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC); 2339 2340 return nilfs_segctor_sync(sci); 2341 } 2342 2343 /** 2344 * nilfs_construct_dsync_segment - construct a data-only logical segment 2345 * @sb: super block 2346 * @inode: inode whose data blocks should be written out 2347 * @start: start byte offset 2348 * @end: end byte offset (inclusive) 2349 * 2350 * Return Value: On success, 0 is returned. On errors, one of the following 2351 * negative error code is returned. 2352 * 2353 * %-EROFS - Read only filesystem. 2354 * 2355 * %-EIO - I/O error 2356 * 2357 * %-ENOSPC - No space left on device (only in a panic state). 2358 * 2359 * %-ERESTARTSYS - Interrupted. 2360 * 2361 * %-ENOMEM - Insufficient memory available. 2362 */ 2363 int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode, 2364 loff_t start, loff_t end) 2365 { 2366 struct the_nilfs *nilfs = sb->s_fs_info; 2367 struct nilfs_sc_info *sci = nilfs->ns_writer; 2368 struct nilfs_inode_info *ii; 2369 struct nilfs_transaction_info ti; 2370 int err = 0; 2371 2372 if (sb_rdonly(sb) || unlikely(!sci)) 2373 return -EROFS; 2374 2375 nilfs_transaction_lock(sb, &ti, 0); 2376 2377 ii = NILFS_I(inode); 2378 if (test_bit(NILFS_I_INODE_SYNC, &ii->i_state) || 2379 nilfs_test_opt(nilfs, STRICT_ORDER) || 2380 test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) || 2381 nilfs_discontinued(nilfs)) { 2382 nilfs_transaction_unlock(sb); 2383 err = nilfs_segctor_sync(sci); 2384 return err; 2385 } 2386 2387 spin_lock(&nilfs->ns_inode_lock); 2388 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) && 2389 !test_bit(NILFS_I_BUSY, &ii->i_state)) { 2390 spin_unlock(&nilfs->ns_inode_lock); 2391 nilfs_transaction_unlock(sb); 2392 return 0; 2393 } 2394 spin_unlock(&nilfs->ns_inode_lock); 2395 sci->sc_dsync_inode = ii; 2396 sci->sc_dsync_start = start; 2397 sci->sc_dsync_end = end; 2398 2399 err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC); 2400 if (!err) 2401 nilfs->ns_flushed_device = 0; 2402 2403 nilfs_transaction_unlock(sb); 2404 return err; 2405 } 2406 2407 #define FLUSH_FILE_BIT (0x1) /* data file only */ 2408 #define FLUSH_DAT_BIT BIT(NILFS_DAT_INO) /* DAT only */ 2409 2410 /** 2411 * nilfs_segctor_accept - record accepted sequence count of log-write requests 2412 * @sci: segment constructor object 2413 */ 2414 static void nilfs_segctor_accept(struct nilfs_sc_info *sci) 2415 { 2416 bool thread_is_alive; 2417 2418 spin_lock(&sci->sc_state_lock); 2419 sci->sc_seq_accepted = sci->sc_seq_request; 2420 thread_is_alive = (bool)sci->sc_task; 2421 spin_unlock(&sci->sc_state_lock); 2422 2423 /* 2424 * This function does not race with the log writer thread's 2425 * termination. Therefore, deleting sc_timer, which should not be 2426 * done after the log writer thread exits, can be done safely outside 2427 * the area protected by sc_state_lock. 2428 */ 2429 if (thread_is_alive) 2430 del_timer_sync(&sci->sc_timer); 2431 } 2432 2433 /** 2434 * nilfs_segctor_notify - notify the result of request to caller threads 2435 * @sci: segment constructor object 2436 * @mode: mode of log forming 2437 * @err: error code to be notified 2438 */ 2439 static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err) 2440 { 2441 /* Clear requests (even when the construction failed) */ 2442 spin_lock(&sci->sc_state_lock); 2443 2444 if (mode == SC_LSEG_SR) { 2445 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT; 2446 sci->sc_seq_done = sci->sc_seq_accepted; 2447 nilfs_segctor_wakeup(sci, err, false); 2448 sci->sc_flush_request = 0; 2449 } else { 2450 if (mode == SC_FLUSH_FILE) 2451 sci->sc_flush_request &= ~FLUSH_FILE_BIT; 2452 else if (mode == SC_FLUSH_DAT) 2453 sci->sc_flush_request &= ~FLUSH_DAT_BIT; 2454 2455 /* re-enable timer if checkpoint creation was not done */ 2456 if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) && sci->sc_task && 2457 time_before(jiffies, sci->sc_timer.expires)) 2458 add_timer(&sci->sc_timer); 2459 } 2460 spin_unlock(&sci->sc_state_lock); 2461 } 2462 2463 /** 2464 * nilfs_segctor_construct - form logs and write them to disk 2465 * @sci: segment constructor object 2466 * @mode: mode of log forming 2467 */ 2468 static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode) 2469 { 2470 struct the_nilfs *nilfs = sci->sc_super->s_fs_info; 2471 struct nilfs_super_block **sbp; 2472 int err = 0; 2473 2474 nilfs_segctor_accept(sci); 2475 2476 if (nilfs_discontinued(nilfs)) 2477 mode = SC_LSEG_SR; 2478 if (!nilfs_segctor_confirm(sci)) 2479 err = nilfs_segctor_do_construct(sci, mode); 2480 2481 if (likely(!err)) { 2482 if (mode != SC_FLUSH_DAT) 2483 atomic_set(&nilfs->ns_ndirtyblks, 0); 2484 if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) && 2485 nilfs_discontinued(nilfs)) { 2486 down_write(&nilfs->ns_sem); 2487 err = -EIO; 2488 sbp = nilfs_prepare_super(sci->sc_super, 2489 nilfs_sb_will_flip(nilfs)); 2490 if (likely(sbp)) { 2491 nilfs_set_log_cursor(sbp[0], nilfs); 2492 err = nilfs_commit_super(sci->sc_super, 2493 NILFS_SB_COMMIT); 2494 } 2495 up_write(&nilfs->ns_sem); 2496 } 2497 } 2498 2499 nilfs_segctor_notify(sci, mode, err); 2500 return err; 2501 } 2502 2503 static void nilfs_construction_timeout(struct timer_list *t) 2504 { 2505 struct nilfs_sc_info *sci = from_timer(sci, t, sc_timer); 2506 2507 wake_up_process(sci->sc_task); 2508 } 2509 2510 static void 2511 nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head) 2512 { 2513 struct nilfs_inode_info *ii, *n; 2514 2515 list_for_each_entry_safe(ii, n, head, i_dirty) { 2516 if (!test_bit(NILFS_I_UPDATED, &ii->i_state)) 2517 continue; 2518 list_del_init(&ii->i_dirty); 2519 truncate_inode_pages(&ii->vfs_inode.i_data, 0); 2520 nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping); 2521 iput(&ii->vfs_inode); 2522 } 2523 } 2524 2525 int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv, 2526 void **kbufs) 2527 { 2528 struct the_nilfs *nilfs = sb->s_fs_info; 2529 struct nilfs_sc_info *sci = nilfs->ns_writer; 2530 struct nilfs_transaction_info ti; 2531 int err; 2532 2533 if (unlikely(!sci)) 2534 return -EROFS; 2535 2536 nilfs_transaction_lock(sb, &ti, 1); 2537 2538 err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat); 2539 if (unlikely(err)) 2540 goto out_unlock; 2541 2542 err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs); 2543 if (unlikely(err)) { 2544 nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat); 2545 goto out_unlock; 2546 } 2547 2548 sci->sc_freesegs = kbufs[4]; 2549 sci->sc_nfreesegs = argv[4].v_nmembs; 2550 list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes); 2551 2552 for (;;) { 2553 err = nilfs_segctor_construct(sci, SC_LSEG_SR); 2554 nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes); 2555 2556 if (likely(!err)) 2557 break; 2558 2559 nilfs_warn(sb, "error %d cleaning segments", err); 2560 set_current_state(TASK_INTERRUPTIBLE); 2561 schedule_timeout(sci->sc_interval); 2562 } 2563 if (nilfs_test_opt(nilfs, DISCARD)) { 2564 int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs, 2565 sci->sc_nfreesegs); 2566 if (ret) { 2567 nilfs_warn(sb, 2568 "error %d on discard request, turning discards off for the device", 2569 ret); 2570 nilfs_clear_opt(nilfs, DISCARD); 2571 } 2572 } 2573 2574 out_unlock: 2575 sci->sc_freesegs = NULL; 2576 sci->sc_nfreesegs = 0; 2577 nilfs_mdt_clear_shadow_map(nilfs->ns_dat); 2578 nilfs_transaction_unlock(sb); 2579 return err; 2580 } 2581 2582 static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode) 2583 { 2584 struct nilfs_transaction_info ti; 2585 2586 nilfs_transaction_lock(sci->sc_super, &ti, 0); 2587 nilfs_segctor_construct(sci, mode); 2588 2589 /* 2590 * Unclosed segment should be retried. We do this using sc_timer. 2591 * Timeout of sc_timer will invoke complete construction which leads 2592 * to close the current logical segment. 2593 */ 2594 if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) 2595 nilfs_segctor_start_timer(sci); 2596 2597 nilfs_transaction_unlock(sci->sc_super); 2598 } 2599 2600 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci) 2601 { 2602 int mode = 0; 2603 2604 spin_lock(&sci->sc_state_lock); 2605 mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ? 2606 SC_FLUSH_DAT : SC_FLUSH_FILE; 2607 spin_unlock(&sci->sc_state_lock); 2608 2609 if (mode) { 2610 nilfs_segctor_do_construct(sci, mode); 2611 2612 spin_lock(&sci->sc_state_lock); 2613 sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ? 2614 ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT; 2615 spin_unlock(&sci->sc_state_lock); 2616 } 2617 clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags); 2618 } 2619 2620 static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci) 2621 { 2622 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) || 2623 time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) { 2624 if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT)) 2625 return SC_FLUSH_FILE; 2626 else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT)) 2627 return SC_FLUSH_DAT; 2628 } 2629 return SC_LSEG_SR; 2630 } 2631 2632 /** 2633 * nilfs_log_write_required - determine whether log writing is required 2634 * @sci: nilfs_sc_info struct 2635 * @modep: location for storing log writing mode 2636 * 2637 * Return: true if log writing is required, false otherwise. If log writing 2638 * is required, the mode is stored in the location pointed to by @modep. 2639 */ 2640 static bool nilfs_log_write_required(struct nilfs_sc_info *sci, int *modep) 2641 { 2642 bool timedout, ret = true; 2643 2644 spin_lock(&sci->sc_state_lock); 2645 timedout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) && 2646 time_after_eq(jiffies, sci->sc_timer.expires)); 2647 if (timedout || sci->sc_seq_request != sci->sc_seq_done) 2648 *modep = SC_LSEG_SR; 2649 else if (sci->sc_flush_request) 2650 *modep = nilfs_segctor_flush_mode(sci); 2651 else 2652 ret = false; 2653 2654 spin_unlock(&sci->sc_state_lock); 2655 return ret; 2656 } 2657 2658 /** 2659 * nilfs_segctor_thread - main loop of the log writer thread 2660 * @arg: pointer to a struct nilfs_sc_info. 2661 * 2662 * nilfs_segctor_thread() is the main loop function of the log writer kernel 2663 * thread, which determines whether log writing is necessary, and if so, 2664 * performs the log write in the background, or waits if not. It is also 2665 * used to decide the background writeback of the superblock. 2666 * 2667 * Return: Always 0. 2668 */ 2669 static int nilfs_segctor_thread(void *arg) 2670 { 2671 struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg; 2672 struct the_nilfs *nilfs = sci->sc_super->s_fs_info; 2673 2674 nilfs_info(sci->sc_super, 2675 "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds", 2676 sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ); 2677 2678 set_freezable(); 2679 2680 while (!kthread_should_stop()) { 2681 DEFINE_WAIT(wait); 2682 bool should_write; 2683 int mode; 2684 2685 if (freezing(current)) { 2686 try_to_freeze(); 2687 continue; 2688 } 2689 2690 prepare_to_wait(&sci->sc_wait_daemon, &wait, 2691 TASK_INTERRUPTIBLE); 2692 should_write = nilfs_log_write_required(sci, &mode); 2693 if (!should_write) 2694 schedule(); 2695 finish_wait(&sci->sc_wait_daemon, &wait); 2696 2697 if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs)) 2698 set_nilfs_discontinued(nilfs); 2699 2700 if (should_write) 2701 nilfs_segctor_thread_construct(sci, mode); 2702 } 2703 2704 /* end sync. */ 2705 spin_lock(&sci->sc_state_lock); 2706 sci->sc_task = NULL; 2707 timer_shutdown_sync(&sci->sc_timer); 2708 spin_unlock(&sci->sc_state_lock); 2709 return 0; 2710 } 2711 2712 /* 2713 * Setup & clean-up functions 2714 */ 2715 static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb, 2716 struct nilfs_root *root) 2717 { 2718 struct the_nilfs *nilfs = sb->s_fs_info; 2719 struct nilfs_sc_info *sci; 2720 2721 sci = kzalloc(sizeof(*sci), GFP_KERNEL); 2722 if (!sci) 2723 return NULL; 2724 2725 sci->sc_super = sb; 2726 2727 nilfs_get_root(root); 2728 sci->sc_root = root; 2729 2730 init_waitqueue_head(&sci->sc_wait_request); 2731 init_waitqueue_head(&sci->sc_wait_daemon); 2732 spin_lock_init(&sci->sc_state_lock); 2733 INIT_LIST_HEAD(&sci->sc_dirty_files); 2734 INIT_LIST_HEAD(&sci->sc_segbufs); 2735 INIT_LIST_HEAD(&sci->sc_write_logs); 2736 INIT_LIST_HEAD(&sci->sc_gc_inodes); 2737 INIT_LIST_HEAD(&sci->sc_iput_queue); 2738 INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func); 2739 2740 sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT; 2741 sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ; 2742 sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK; 2743 2744 if (nilfs->ns_interval) 2745 sci->sc_interval = HZ * nilfs->ns_interval; 2746 if (nilfs->ns_watermark) 2747 sci->sc_watermark = nilfs->ns_watermark; 2748 return sci; 2749 } 2750 2751 static void nilfs_segctor_write_out(struct nilfs_sc_info *sci) 2752 { 2753 int ret, retrycount = NILFS_SC_CLEANUP_RETRY; 2754 2755 /* 2756 * The segctord thread was stopped and its timer was removed. 2757 * But some tasks remain. 2758 */ 2759 do { 2760 struct nilfs_transaction_info ti; 2761 2762 nilfs_transaction_lock(sci->sc_super, &ti, 0); 2763 ret = nilfs_segctor_construct(sci, SC_LSEG_SR); 2764 nilfs_transaction_unlock(sci->sc_super); 2765 2766 flush_work(&sci->sc_iput_work); 2767 2768 } while (ret && ret != -EROFS && retrycount-- > 0); 2769 } 2770 2771 /** 2772 * nilfs_segctor_destroy - destroy the segment constructor. 2773 * @sci: nilfs_sc_info 2774 * 2775 * nilfs_segctor_destroy() kills the segctord thread and frees 2776 * the nilfs_sc_info struct. 2777 * Caller must hold the segment semaphore. 2778 */ 2779 static void nilfs_segctor_destroy(struct nilfs_sc_info *sci) 2780 { 2781 struct the_nilfs *nilfs = sci->sc_super->s_fs_info; 2782 int flag; 2783 2784 up_write(&nilfs->ns_segctor_sem); 2785 2786 if (sci->sc_task) { 2787 wake_up(&sci->sc_wait_daemon); 2788 kthread_stop(sci->sc_task); 2789 } 2790 2791 spin_lock(&sci->sc_state_lock); 2792 flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request 2793 || sci->sc_seq_request != sci->sc_seq_done); 2794 spin_unlock(&sci->sc_state_lock); 2795 2796 /* 2797 * Forcibly wake up tasks waiting in nilfs_segctor_sync(), which can 2798 * be called from delayed iput() via nilfs_evict_inode() and can race 2799 * with the above log writer thread termination. 2800 */ 2801 nilfs_segctor_wakeup(sci, 0, true); 2802 2803 if (flush_work(&sci->sc_iput_work)) 2804 flag = true; 2805 2806 if (flag || !nilfs_segctor_confirm(sci)) 2807 nilfs_segctor_write_out(sci); 2808 2809 if (!list_empty(&sci->sc_dirty_files)) { 2810 nilfs_warn(sci->sc_super, 2811 "disposed unprocessed dirty file(s) when stopping log writer"); 2812 nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1); 2813 } 2814 2815 if (!list_empty(&sci->sc_iput_queue)) { 2816 nilfs_warn(sci->sc_super, 2817 "disposed unprocessed inode(s) in iput queue when stopping log writer"); 2818 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1); 2819 } 2820 2821 WARN_ON(!list_empty(&sci->sc_segbufs)); 2822 WARN_ON(!list_empty(&sci->sc_write_logs)); 2823 2824 nilfs_put_root(sci->sc_root); 2825 2826 down_write(&nilfs->ns_segctor_sem); 2827 2828 kfree(sci); 2829 } 2830 2831 /** 2832 * nilfs_attach_log_writer - attach log writer 2833 * @sb: super block instance 2834 * @root: root object of the current filesystem tree 2835 * 2836 * This allocates a log writer object, initializes it, and starts the 2837 * log writer. 2838 * 2839 * Return: 0 on success, or the following negative error code on failure. 2840 * * %-EINTR - Log writer thread creation failed due to interruption. 2841 * * %-ENOMEM - Insufficient memory available. 2842 */ 2843 int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root) 2844 { 2845 struct the_nilfs *nilfs = sb->s_fs_info; 2846 struct nilfs_sc_info *sci; 2847 struct task_struct *t; 2848 int err; 2849 2850 if (nilfs->ns_writer) { 2851 /* 2852 * This happens if the filesystem is made read-only by 2853 * __nilfs_error or nilfs_remount and then remounted 2854 * read/write. In these cases, reuse the existing 2855 * writer. 2856 */ 2857 return 0; 2858 } 2859 2860 sci = nilfs_segctor_new(sb, root); 2861 if (unlikely(!sci)) 2862 return -ENOMEM; 2863 2864 nilfs->ns_writer = sci; 2865 t = kthread_create(nilfs_segctor_thread, sci, "segctord"); 2866 if (IS_ERR(t)) { 2867 err = PTR_ERR(t); 2868 nilfs_err(sb, "error %d creating segctord thread", err); 2869 nilfs_detach_log_writer(sb); 2870 return err; 2871 } 2872 sci->sc_task = t; 2873 timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0); 2874 2875 wake_up_process(sci->sc_task); 2876 return 0; 2877 } 2878 2879 /** 2880 * nilfs_detach_log_writer - destroy log writer 2881 * @sb: super block instance 2882 * 2883 * This kills log writer daemon, frees the log writer object, and 2884 * destroys list of dirty files. 2885 */ 2886 void nilfs_detach_log_writer(struct super_block *sb) 2887 { 2888 struct the_nilfs *nilfs = sb->s_fs_info; 2889 LIST_HEAD(garbage_list); 2890 2891 down_write(&nilfs->ns_segctor_sem); 2892 if (nilfs->ns_writer) { 2893 nilfs_segctor_destroy(nilfs->ns_writer); 2894 nilfs->ns_writer = NULL; 2895 } 2896 set_nilfs_purging(nilfs); 2897 2898 /* Force to free the list of dirty files */ 2899 spin_lock(&nilfs->ns_inode_lock); 2900 if (!list_empty(&nilfs->ns_dirty_files)) { 2901 list_splice_init(&nilfs->ns_dirty_files, &garbage_list); 2902 nilfs_warn(sb, 2903 "disposed unprocessed dirty file(s) when detaching log writer"); 2904 } 2905 spin_unlock(&nilfs->ns_inode_lock); 2906 up_write(&nilfs->ns_segctor_sem); 2907 2908 nilfs_dispose_list(nilfs, &garbage_list, 1); 2909 clear_nilfs_purging(nilfs); 2910 } 2911