1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * background writeback - scan btree for dirty data and write it to the backing 4 * device 5 * 6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 7 * Copyright 2012 Google, Inc. 8 */ 9 10 #include "bcache.h" 11 #include "btree.h" 12 #include "debug.h" 13 #include "writeback.h" 14 15 #include <linux/delay.h> 16 #include <linux/kthread.h> 17 #include <linux/sched/clock.h> 18 #include <trace/events/bcache.h> 19 20 /* Rate limiting */ 21 static uint64_t __calc_target_rate(struct cached_dev *dc) 22 { 23 struct cache_set *c = dc->disk.c; 24 25 /* 26 * This is the size of the cache, minus the amount used for 27 * flash-only devices 28 */ 29 uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size - 30 atomic_long_read(&c->flash_dev_dirty_sectors); 31 32 /* 33 * Unfortunately there is no control of global dirty data. If the 34 * user states that they want 10% dirty data in the cache, and has, 35 * e.g., 5 backing volumes of equal size, we try and ensure each 36 * backing volume uses about 2% of the cache for dirty data. 37 */ 38 uint32_t bdev_share = 39 div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT, 40 c->cached_dev_sectors); 41 42 uint64_t cache_dirty_target = 43 div_u64(cache_sectors * dc->writeback_percent, 100); 44 45 /* Ensure each backing dev gets at least one dirty share */ 46 if (bdev_share < 1) 47 bdev_share = 1; 48 49 return (cache_dirty_target * bdev_share) >> WRITEBACK_SHARE_SHIFT; 50 } 51 52 static void __update_writeback_rate(struct cached_dev *dc) 53 { 54 /* 55 * PI controller: 56 * Figures out the amount that should be written per second. 57 * 58 * First, the error (number of sectors that are dirty beyond our 59 * target) is calculated. The error is accumulated (numerically 60 * integrated). 61 * 62 * Then, the proportional value and integral value are scaled 63 * based on configured values. These are stored as inverses to 64 * avoid fixed point math and to make configuration easy-- e.g. 65 * the default value of 40 for writeback_rate_p_term_inverse 66 * attempts to write at a rate that would retire all the dirty 67 * blocks in 40 seconds. 68 * 69 * The writeback_rate_i_inverse value of 10000 means that 1/10000th 70 * of the error is accumulated in the integral term per second. 71 * This acts as a slow, long-term average that is not subject to 72 * variations in usage like the p term. 73 */ 74 int64_t target = __calc_target_rate(dc); 75 int64_t dirty = bcache_dev_sectors_dirty(&dc->disk); 76 int64_t error = dirty - target; 77 int64_t proportional_scaled = 78 div_s64(error, dc->writeback_rate_p_term_inverse); 79 int64_t integral_scaled; 80 uint32_t new_rate; 81 82 if ((error < 0 && dc->writeback_rate_integral > 0) || 83 (error > 0 && time_before64(local_clock(), 84 dc->writeback_rate.next + NSEC_PER_MSEC))) { 85 /* 86 * Only decrease the integral term if it's more than 87 * zero. Only increase the integral term if the device 88 * is keeping up. (Don't wind up the integral 89 * ineffectively in either case). 90 * 91 * It's necessary to scale this by 92 * writeback_rate_update_seconds to keep the integral 93 * term dimensioned properly. 94 */ 95 dc->writeback_rate_integral += error * 96 dc->writeback_rate_update_seconds; 97 } 98 99 integral_scaled = div_s64(dc->writeback_rate_integral, 100 dc->writeback_rate_i_term_inverse); 101 102 new_rate = clamp_t(int32_t, (proportional_scaled + integral_scaled), 103 dc->writeback_rate_minimum, NSEC_PER_SEC); 104 105 dc->writeback_rate_proportional = proportional_scaled; 106 dc->writeback_rate_integral_scaled = integral_scaled; 107 dc->writeback_rate_change = new_rate - 108 atomic_long_read(&dc->writeback_rate.rate); 109 atomic_long_set(&dc->writeback_rate.rate, new_rate); 110 dc->writeback_rate_target = target; 111 } 112 113 static bool set_at_max_writeback_rate(struct cache_set *c, 114 struct cached_dev *dc) 115 { 116 /* 117 * Idle_counter is increased everytime when update_writeback_rate() is 118 * called. If all backing devices attached to the same cache set have 119 * identical dc->writeback_rate_update_seconds values, it is about 6 120 * rounds of update_writeback_rate() on each backing device before 121 * c->at_max_writeback_rate is set to 1, and then max wrteback rate set 122 * to each dc->writeback_rate.rate. 123 * In order to avoid extra locking cost for counting exact dirty cached 124 * devices number, c->attached_dev_nr is used to calculate the idle 125 * throushold. It might be bigger if not all cached device are in write- 126 * back mode, but it still works well with limited extra rounds of 127 * update_writeback_rate(). 128 */ 129 if (atomic_inc_return(&c->idle_counter) < 130 atomic_read(&c->attached_dev_nr) * 6) 131 return false; 132 133 if (atomic_read(&c->at_max_writeback_rate) != 1) 134 atomic_set(&c->at_max_writeback_rate, 1); 135 136 atomic_long_set(&dc->writeback_rate.rate, INT_MAX); 137 138 /* keep writeback_rate_target as existing value */ 139 dc->writeback_rate_proportional = 0; 140 dc->writeback_rate_integral_scaled = 0; 141 dc->writeback_rate_change = 0; 142 143 /* 144 * Check c->idle_counter and c->at_max_writeback_rate agagain in case 145 * new I/O arrives during before set_at_max_writeback_rate() returns. 146 * Then the writeback rate is set to 1, and its new value should be 147 * decided via __update_writeback_rate(). 148 */ 149 if ((atomic_read(&c->idle_counter) < 150 atomic_read(&c->attached_dev_nr) * 6) || 151 !atomic_read(&c->at_max_writeback_rate)) 152 return false; 153 154 return true; 155 } 156 157 static void update_writeback_rate(struct work_struct *work) 158 { 159 struct cached_dev *dc = container_of(to_delayed_work(work), 160 struct cached_dev, 161 writeback_rate_update); 162 struct cache_set *c = dc->disk.c; 163 164 /* 165 * should check BCACHE_DEV_RATE_DW_RUNNING before calling 166 * cancel_delayed_work_sync(). 167 */ 168 set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); 169 /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */ 170 smp_mb(); 171 172 /* 173 * CACHE_SET_IO_DISABLE might be set via sysfs interface, 174 * check it here too. 175 */ 176 if (!test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) || 177 test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { 178 clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); 179 /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */ 180 smp_mb(); 181 return; 182 } 183 184 if (atomic_read(&dc->has_dirty) && dc->writeback_percent) { 185 /* 186 * If the whole cache set is idle, set_at_max_writeback_rate() 187 * will set writeback rate to a max number. Then it is 188 * unncessary to update writeback rate for an idle cache set 189 * in maximum writeback rate number(s). 190 */ 191 if (!set_at_max_writeback_rate(c, dc)) { 192 down_read(&dc->writeback_lock); 193 __update_writeback_rate(dc); 194 up_read(&dc->writeback_lock); 195 } 196 } 197 198 199 /* 200 * CACHE_SET_IO_DISABLE might be set via sysfs interface, 201 * check it here too. 202 */ 203 if (test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) && 204 !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { 205 schedule_delayed_work(&dc->writeback_rate_update, 206 dc->writeback_rate_update_seconds * HZ); 207 } 208 209 /* 210 * should check BCACHE_DEV_RATE_DW_RUNNING before calling 211 * cancel_delayed_work_sync(). 212 */ 213 clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); 214 /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */ 215 smp_mb(); 216 } 217 218 static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) 219 { 220 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || 221 !dc->writeback_percent) 222 return 0; 223 224 return bch_next_delay(&dc->writeback_rate, sectors); 225 } 226 227 struct dirty_io { 228 struct closure cl; 229 struct cached_dev *dc; 230 uint16_t sequence; 231 struct bio bio; 232 }; 233 234 static void dirty_init(struct keybuf_key *w) 235 { 236 struct dirty_io *io = w->private; 237 struct bio *bio = &io->bio; 238 239 bio_init(bio, bio->bi_inline_vecs, 240 DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)); 241 if (!io->dc->writeback_percent) 242 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); 243 244 bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9; 245 bio->bi_private = w; 246 bch_bio_map(bio, NULL); 247 } 248 249 static void dirty_io_destructor(struct closure *cl) 250 { 251 struct dirty_io *io = container_of(cl, struct dirty_io, cl); 252 kfree(io); 253 } 254 255 static void write_dirty_finish(struct closure *cl) 256 { 257 struct dirty_io *io = container_of(cl, struct dirty_io, cl); 258 struct keybuf_key *w = io->bio.bi_private; 259 struct cached_dev *dc = io->dc; 260 261 bio_free_pages(&io->bio); 262 263 /* This is kind of a dumb way of signalling errors. */ 264 if (KEY_DIRTY(&w->key)) { 265 int ret; 266 unsigned i; 267 struct keylist keys; 268 269 bch_keylist_init(&keys); 270 271 bkey_copy(keys.top, &w->key); 272 SET_KEY_DIRTY(keys.top, false); 273 bch_keylist_push(&keys); 274 275 for (i = 0; i < KEY_PTRS(&w->key); i++) 276 atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin); 277 278 ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key); 279 280 if (ret) 281 trace_bcache_writeback_collision(&w->key); 282 283 atomic_long_inc(ret 284 ? &dc->disk.c->writeback_keys_failed 285 : &dc->disk.c->writeback_keys_done); 286 } 287 288 bch_keybuf_del(&dc->writeback_keys, w); 289 up(&dc->in_flight); 290 291 closure_return_with_destructor(cl, dirty_io_destructor); 292 } 293 294 static void dirty_endio(struct bio *bio) 295 { 296 struct keybuf_key *w = bio->bi_private; 297 struct dirty_io *io = w->private; 298 299 if (bio->bi_status) { 300 SET_KEY_DIRTY(&w->key, false); 301 bch_count_backing_io_errors(io->dc, bio); 302 } 303 304 closure_put(&io->cl); 305 } 306 307 static void write_dirty(struct closure *cl) 308 { 309 struct dirty_io *io = container_of(cl, struct dirty_io, cl); 310 struct keybuf_key *w = io->bio.bi_private; 311 struct cached_dev *dc = io->dc; 312 313 uint16_t next_sequence; 314 315 if (atomic_read(&dc->writeback_sequence_next) != io->sequence) { 316 /* Not our turn to write; wait for a write to complete */ 317 closure_wait(&dc->writeback_ordering_wait, cl); 318 319 if (atomic_read(&dc->writeback_sequence_next) == io->sequence) { 320 /* 321 * Edge case-- it happened in indeterminate order 322 * relative to when we were added to wait list.. 323 */ 324 closure_wake_up(&dc->writeback_ordering_wait); 325 } 326 327 continue_at(cl, write_dirty, io->dc->writeback_write_wq); 328 return; 329 } 330 331 next_sequence = io->sequence + 1; 332 333 /* 334 * IO errors are signalled using the dirty bit on the key. 335 * If we failed to read, we should not attempt to write to the 336 * backing device. Instead, immediately go to write_dirty_finish 337 * to clean up. 338 */ 339 if (KEY_DIRTY(&w->key)) { 340 dirty_init(w); 341 bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0); 342 io->bio.bi_iter.bi_sector = KEY_START(&w->key); 343 bio_set_dev(&io->bio, io->dc->bdev); 344 io->bio.bi_end_io = dirty_endio; 345 346 /* I/O request sent to backing device */ 347 closure_bio_submit(io->dc->disk.c, &io->bio, cl); 348 } 349 350 atomic_set(&dc->writeback_sequence_next, next_sequence); 351 closure_wake_up(&dc->writeback_ordering_wait); 352 353 continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq); 354 } 355 356 static void read_dirty_endio(struct bio *bio) 357 { 358 struct keybuf_key *w = bio->bi_private; 359 struct dirty_io *io = w->private; 360 361 /* is_read = 1 */ 362 bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0), 363 bio->bi_status, 1, 364 "reading dirty data from cache"); 365 366 dirty_endio(bio); 367 } 368 369 static void read_dirty_submit(struct closure *cl) 370 { 371 struct dirty_io *io = container_of(cl, struct dirty_io, cl); 372 373 closure_bio_submit(io->dc->disk.c, &io->bio, cl); 374 375 continue_at(cl, write_dirty, io->dc->writeback_write_wq); 376 } 377 378 static void read_dirty(struct cached_dev *dc) 379 { 380 unsigned delay = 0; 381 struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w; 382 size_t size; 383 int nk, i; 384 struct dirty_io *io; 385 struct closure cl; 386 uint16_t sequence = 0; 387 388 BUG_ON(!llist_empty(&dc->writeback_ordering_wait.list)); 389 atomic_set(&dc->writeback_sequence_next, sequence); 390 closure_init_stack(&cl); 391 392 /* 393 * XXX: if we error, background writeback just spins. Should use some 394 * mempools. 395 */ 396 397 next = bch_keybuf_next(&dc->writeback_keys); 398 399 while (!kthread_should_stop() && 400 !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) && 401 next) { 402 size = 0; 403 nk = 0; 404 405 do { 406 BUG_ON(ptr_stale(dc->disk.c, &next->key, 0)); 407 408 /* 409 * Don't combine too many operations, even if they 410 * are all small. 411 */ 412 if (nk >= MAX_WRITEBACKS_IN_PASS) 413 break; 414 415 /* 416 * If the current operation is very large, don't 417 * further combine operations. 418 */ 419 if (size >= MAX_WRITESIZE_IN_PASS) 420 break; 421 422 /* 423 * Operations are only eligible to be combined 424 * if they are contiguous. 425 * 426 * TODO: add a heuristic willing to fire a 427 * certain amount of non-contiguous IO per pass, 428 * so that we can benefit from backing device 429 * command queueing. 430 */ 431 if ((nk != 0) && bkey_cmp(&keys[nk-1]->key, 432 &START_KEY(&next->key))) 433 break; 434 435 size += KEY_SIZE(&next->key); 436 keys[nk++] = next; 437 } while ((next = bch_keybuf_next(&dc->writeback_keys))); 438 439 /* Now we have gathered a set of 1..5 keys to write back. */ 440 for (i = 0; i < nk; i++) { 441 w = keys[i]; 442 443 io = kzalloc(sizeof(struct dirty_io) + 444 sizeof(struct bio_vec) * 445 DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), 446 GFP_KERNEL); 447 if (!io) 448 goto err; 449 450 w->private = io; 451 io->dc = dc; 452 io->sequence = sequence++; 453 454 dirty_init(w); 455 bio_set_op_attrs(&io->bio, REQ_OP_READ, 0); 456 io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0); 457 bio_set_dev(&io->bio, 458 PTR_CACHE(dc->disk.c, &w->key, 0)->bdev); 459 io->bio.bi_end_io = read_dirty_endio; 460 461 if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL)) 462 goto err_free; 463 464 trace_bcache_writeback(&w->key); 465 466 down(&dc->in_flight); 467 468 /* We've acquired a semaphore for the maximum 469 * simultaneous number of writebacks; from here 470 * everything happens asynchronously. 471 */ 472 closure_call(&io->cl, read_dirty_submit, NULL, &cl); 473 } 474 475 delay = writeback_delay(dc, size); 476 477 while (!kthread_should_stop() && 478 !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) && 479 delay) { 480 schedule_timeout_interruptible(delay); 481 delay = writeback_delay(dc, 0); 482 } 483 } 484 485 if (0) { 486 err_free: 487 kfree(w->private); 488 err: 489 bch_keybuf_del(&dc->writeback_keys, w); 490 } 491 492 /* 493 * Wait for outstanding writeback IOs to finish (and keybuf slots to be 494 * freed) before refilling again 495 */ 496 closure_sync(&cl); 497 } 498 499 /* Scan for dirty data */ 500 501 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode, 502 uint64_t offset, int nr_sectors) 503 { 504 struct bcache_device *d = c->devices[inode]; 505 unsigned stripe_offset, stripe, sectors_dirty; 506 507 if (!d) 508 return; 509 510 if (UUID_FLASH_ONLY(&c->uuids[inode])) 511 atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors); 512 513 stripe = offset_to_stripe(d, offset); 514 stripe_offset = offset & (d->stripe_size - 1); 515 516 while (nr_sectors) { 517 int s = min_t(unsigned, abs(nr_sectors), 518 d->stripe_size - stripe_offset); 519 520 if (nr_sectors < 0) 521 s = -s; 522 523 if (stripe >= d->nr_stripes) 524 return; 525 526 sectors_dirty = atomic_add_return(s, 527 d->stripe_sectors_dirty + stripe); 528 if (sectors_dirty == d->stripe_size) 529 set_bit(stripe, d->full_dirty_stripes); 530 else 531 clear_bit(stripe, d->full_dirty_stripes); 532 533 nr_sectors -= s; 534 stripe_offset = 0; 535 stripe++; 536 } 537 } 538 539 static bool dirty_pred(struct keybuf *buf, struct bkey *k) 540 { 541 struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys); 542 543 BUG_ON(KEY_INODE(k) != dc->disk.id); 544 545 return KEY_DIRTY(k); 546 } 547 548 static void refill_full_stripes(struct cached_dev *dc) 549 { 550 struct keybuf *buf = &dc->writeback_keys; 551 unsigned start_stripe, stripe, next_stripe; 552 bool wrapped = false; 553 554 stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned)); 555 556 if (stripe >= dc->disk.nr_stripes) 557 stripe = 0; 558 559 start_stripe = stripe; 560 561 while (1) { 562 stripe = find_next_bit(dc->disk.full_dirty_stripes, 563 dc->disk.nr_stripes, stripe); 564 565 if (stripe == dc->disk.nr_stripes) 566 goto next; 567 568 next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes, 569 dc->disk.nr_stripes, stripe); 570 571 buf->last_scanned = KEY(dc->disk.id, 572 stripe * dc->disk.stripe_size, 0); 573 574 bch_refill_keybuf(dc->disk.c, buf, 575 &KEY(dc->disk.id, 576 next_stripe * dc->disk.stripe_size, 0), 577 dirty_pred); 578 579 if (array_freelist_empty(&buf->freelist)) 580 return; 581 582 stripe = next_stripe; 583 next: 584 if (wrapped && stripe > start_stripe) 585 return; 586 587 if (stripe == dc->disk.nr_stripes) { 588 stripe = 0; 589 wrapped = true; 590 } 591 } 592 } 593 594 /* 595 * Returns true if we scanned the entire disk 596 */ 597 static bool refill_dirty(struct cached_dev *dc) 598 { 599 struct keybuf *buf = &dc->writeback_keys; 600 struct bkey start = KEY(dc->disk.id, 0, 0); 601 struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0); 602 struct bkey start_pos; 603 604 /* 605 * make sure keybuf pos is inside the range for this disk - at bringup 606 * we might not be attached yet so this disk's inode nr isn't 607 * initialized then 608 */ 609 if (bkey_cmp(&buf->last_scanned, &start) < 0 || 610 bkey_cmp(&buf->last_scanned, &end) > 0) 611 buf->last_scanned = start; 612 613 if (dc->partial_stripes_expensive) { 614 refill_full_stripes(dc); 615 if (array_freelist_empty(&buf->freelist)) 616 return false; 617 } 618 619 start_pos = buf->last_scanned; 620 bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred); 621 622 if (bkey_cmp(&buf->last_scanned, &end) < 0) 623 return false; 624 625 /* 626 * If we get to the end start scanning again from the beginning, and 627 * only scan up to where we initially started scanning from: 628 */ 629 buf->last_scanned = start; 630 bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred); 631 632 return bkey_cmp(&buf->last_scanned, &start_pos) >= 0; 633 } 634 635 static int bch_writeback_thread(void *arg) 636 { 637 struct cached_dev *dc = arg; 638 struct cache_set *c = dc->disk.c; 639 bool searched_full_index; 640 641 bch_ratelimit_reset(&dc->writeback_rate); 642 643 while (!kthread_should_stop() && 644 !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { 645 down_write(&dc->writeback_lock); 646 set_current_state(TASK_INTERRUPTIBLE); 647 /* 648 * If the bache device is detaching, skip here and continue 649 * to perform writeback. Otherwise, if no dirty data on cache, 650 * or there is dirty data on cache but writeback is disabled, 651 * the writeback thread should sleep here and wait for others 652 * to wake up it. 653 */ 654 if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) && 655 (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) { 656 up_write(&dc->writeback_lock); 657 658 if (kthread_should_stop() || 659 test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { 660 set_current_state(TASK_RUNNING); 661 break; 662 } 663 664 schedule(); 665 continue; 666 } 667 set_current_state(TASK_RUNNING); 668 669 searched_full_index = refill_dirty(dc); 670 671 if (searched_full_index && 672 RB_EMPTY_ROOT(&dc->writeback_keys.keys)) { 673 atomic_set(&dc->has_dirty, 0); 674 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); 675 bch_write_bdev_super(dc, NULL); 676 /* 677 * If bcache device is detaching via sysfs interface, 678 * writeback thread should stop after there is no dirty 679 * data on cache. BCACHE_DEV_DETACHING flag is set in 680 * bch_cached_dev_detach(). 681 */ 682 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) 683 break; 684 } 685 686 up_write(&dc->writeback_lock); 687 688 read_dirty(dc); 689 690 if (searched_full_index) { 691 unsigned delay = dc->writeback_delay * HZ; 692 693 while (delay && 694 !kthread_should_stop() && 695 !test_bit(CACHE_SET_IO_DISABLE, &c->flags) && 696 !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) 697 delay = schedule_timeout_interruptible(delay); 698 699 bch_ratelimit_reset(&dc->writeback_rate); 700 } 701 } 702 703 cached_dev_put(dc); 704 wait_for_kthread_stop(); 705 706 return 0; 707 } 708 709 /* Init */ 710 #define INIT_KEYS_EACH_TIME 500000 711 #define INIT_KEYS_SLEEP_MS 100 712 713 struct sectors_dirty_init { 714 struct btree_op op; 715 unsigned inode; 716 size_t count; 717 struct bkey start; 718 }; 719 720 static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b, 721 struct bkey *k) 722 { 723 struct sectors_dirty_init *op = container_of(_op, 724 struct sectors_dirty_init, op); 725 if (KEY_INODE(k) > op->inode) 726 return MAP_DONE; 727 728 if (KEY_DIRTY(k)) 729 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k), 730 KEY_START(k), KEY_SIZE(k)); 731 732 op->count++; 733 if (atomic_read(&b->c->search_inflight) && 734 !(op->count % INIT_KEYS_EACH_TIME)) { 735 bkey_copy_key(&op->start, k); 736 return -EAGAIN; 737 } 738 739 return MAP_CONTINUE; 740 } 741 742 void bch_sectors_dirty_init(struct bcache_device *d) 743 { 744 struct sectors_dirty_init op; 745 int ret; 746 747 bch_btree_op_init(&op.op, -1); 748 op.inode = d->id; 749 op.count = 0; 750 op.start = KEY(op.inode, 0, 0); 751 752 do { 753 ret = bch_btree_map_keys(&op.op, d->c, &op.start, 754 sectors_dirty_init_fn, 0); 755 if (ret == -EAGAIN) 756 schedule_timeout_interruptible( 757 msecs_to_jiffies(INIT_KEYS_SLEEP_MS)); 758 else if (ret < 0) { 759 pr_warn("sectors dirty init failed, ret=%d!", ret); 760 break; 761 } 762 } while (ret == -EAGAIN); 763 } 764 765 void bch_cached_dev_writeback_init(struct cached_dev *dc) 766 { 767 sema_init(&dc->in_flight, 64); 768 init_rwsem(&dc->writeback_lock); 769 bch_keybuf_init(&dc->writeback_keys); 770 771 dc->writeback_metadata = true; 772 dc->writeback_running = true; 773 dc->writeback_percent = 10; 774 dc->writeback_delay = 30; 775 atomic_long_set(&dc->writeback_rate.rate, 1024); 776 dc->writeback_rate_minimum = 8; 777 778 dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT; 779 dc->writeback_rate_p_term_inverse = 40; 780 dc->writeback_rate_i_term_inverse = 10000; 781 782 WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)); 783 INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); 784 } 785 786 int bch_cached_dev_writeback_start(struct cached_dev *dc) 787 { 788 dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq", 789 WQ_MEM_RECLAIM, 0); 790 if (!dc->writeback_write_wq) 791 return -ENOMEM; 792 793 cached_dev_get(dc); 794 dc->writeback_thread = kthread_create(bch_writeback_thread, dc, 795 "bcache_writeback"); 796 if (IS_ERR(dc->writeback_thread)) { 797 cached_dev_put(dc); 798 return PTR_ERR(dc->writeback_thread); 799 } 800 801 WARN_ON(test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)); 802 schedule_delayed_work(&dc->writeback_rate_update, 803 dc->writeback_rate_update_seconds * HZ); 804 805 bch_writeback_queue(dc); 806 807 return 0; 808 } 809