1 /* 2 * Copyright (C) 2003 Sistina Software Limited. 3 * 4 * This file is released under the GPL. 5 */ 6 7 #include "dm.h" 8 #include "dm-bio-list.h" 9 #include "dm-io.h" 10 #include "dm-log.h" 11 #include "kcopyd.h" 12 13 #include <linux/ctype.h> 14 #include <linux/init.h> 15 #include <linux/mempool.h> 16 #include <linux/module.h> 17 #include <linux/pagemap.h> 18 #include <linux/slab.h> 19 #include <linux/time.h> 20 #include <linux/vmalloc.h> 21 #include <linux/workqueue.h> 22 23 static struct workqueue_struct *_kmirrord_wq; 24 static struct work_struct _kmirrord_work; 25 26 static inline void wake(void) 27 { 28 queue_work(_kmirrord_wq, &_kmirrord_work); 29 } 30 31 /*----------------------------------------------------------------- 32 * Region hash 33 * 34 * The mirror splits itself up into discrete regions. Each 35 * region can be in one of three states: clean, dirty, 36 * nosync. There is no need to put clean regions in the hash. 37 * 38 * In addition to being present in the hash table a region _may_ 39 * be present on one of three lists. 40 * 41 * clean_regions: Regions on this list have no io pending to 42 * them, they are in sync, we are no longer interested in them, 43 * they are dull. rh_update_states() will remove them from the 44 * hash table. 45 * 46 * quiesced_regions: These regions have been spun down, ready 47 * for recovery. rh_recovery_start() will remove regions from 48 * this list and hand them to kmirrord, which will schedule the 49 * recovery io with kcopyd. 50 * 51 * recovered_regions: Regions that kcopyd has successfully 52 * recovered. rh_update_states() will now schedule any delayed 53 * io, up the recovery_count, and remove the region from the 54 * hash. 55 * 56 * There are 2 locks: 57 * A rw spin lock 'hash_lock' protects just the hash table, 58 * this is never held in write mode from interrupt context, 59 * which I believe means that we only have to disable irqs when 60 * doing a write lock. 61 * 62 * An ordinary spin lock 'region_lock' that protects the three 63 * lists in the region_hash, with the 'state', 'list' and 64 * 'bhs_delayed' fields of the regions. This is used from irq 65 * context, so all other uses will have to suspend local irqs. 66 *---------------------------------------------------------------*/ 67 struct mirror_set; 68 struct region_hash { 69 struct mirror_set *ms; 70 uint32_t region_size; 71 unsigned region_shift; 72 73 /* holds persistent region state */ 74 struct dirty_log *log; 75 76 /* hash table */ 77 rwlock_t hash_lock; 78 mempool_t *region_pool; 79 unsigned int mask; 80 unsigned int nr_buckets; 81 struct list_head *buckets; 82 83 spinlock_t region_lock; 84 struct semaphore recovery_count; 85 struct list_head clean_regions; 86 struct list_head quiesced_regions; 87 struct list_head recovered_regions; 88 }; 89 90 enum { 91 RH_CLEAN, 92 RH_DIRTY, 93 RH_NOSYNC, 94 RH_RECOVERING 95 }; 96 97 struct region { 98 struct region_hash *rh; /* FIXME: can we get rid of this ? */ 99 region_t key; 100 int state; 101 102 struct list_head hash_list; 103 struct list_head list; 104 105 atomic_t pending; 106 struct bio_list delayed_bios; 107 }; 108 109 /* 110 * Conversion fns 111 */ 112 static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio) 113 { 114 return bio->bi_sector >> rh->region_shift; 115 } 116 117 static inline sector_t region_to_sector(struct region_hash *rh, region_t region) 118 { 119 return region << rh->region_shift; 120 } 121 122 /* FIXME move this */ 123 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw); 124 125 static void *region_alloc(gfp_t gfp_mask, void *pool_data) 126 { 127 return kmalloc(sizeof(struct region), gfp_mask); 128 } 129 130 static void region_free(void *element, void *pool_data) 131 { 132 kfree(element); 133 } 134 135 #define MIN_REGIONS 64 136 #define MAX_RECOVERY 1 137 static int rh_init(struct region_hash *rh, struct mirror_set *ms, 138 struct dirty_log *log, uint32_t region_size, 139 region_t nr_regions) 140 { 141 unsigned int nr_buckets, max_buckets; 142 size_t i; 143 144 /* 145 * Calculate a suitable number of buckets for our hash 146 * table. 147 */ 148 max_buckets = nr_regions >> 6; 149 for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1) 150 ; 151 nr_buckets >>= 1; 152 153 rh->ms = ms; 154 rh->log = log; 155 rh->region_size = region_size; 156 rh->region_shift = ffs(region_size) - 1; 157 rwlock_init(&rh->hash_lock); 158 rh->mask = nr_buckets - 1; 159 rh->nr_buckets = nr_buckets; 160 161 rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets)); 162 if (!rh->buckets) { 163 DMERR("unable to allocate region hash memory"); 164 return -ENOMEM; 165 } 166 167 for (i = 0; i < nr_buckets; i++) 168 INIT_LIST_HEAD(rh->buckets + i); 169 170 spin_lock_init(&rh->region_lock); 171 sema_init(&rh->recovery_count, 0); 172 INIT_LIST_HEAD(&rh->clean_regions); 173 INIT_LIST_HEAD(&rh->quiesced_regions); 174 INIT_LIST_HEAD(&rh->recovered_regions); 175 176 rh->region_pool = mempool_create(MIN_REGIONS, region_alloc, 177 region_free, NULL); 178 if (!rh->region_pool) { 179 vfree(rh->buckets); 180 rh->buckets = NULL; 181 return -ENOMEM; 182 } 183 184 return 0; 185 } 186 187 static void rh_exit(struct region_hash *rh) 188 { 189 unsigned int h; 190 struct region *reg, *nreg; 191 192 BUG_ON(!list_empty(&rh->quiesced_regions)); 193 for (h = 0; h < rh->nr_buckets; h++) { 194 list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) { 195 BUG_ON(atomic_read(®->pending)); 196 mempool_free(reg, rh->region_pool); 197 } 198 } 199 200 if (rh->log) 201 dm_destroy_dirty_log(rh->log); 202 if (rh->region_pool) 203 mempool_destroy(rh->region_pool); 204 vfree(rh->buckets); 205 } 206 207 #define RH_HASH_MULT 2654435387U 208 209 static inline unsigned int rh_hash(struct region_hash *rh, region_t region) 210 { 211 return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask; 212 } 213 214 static struct region *__rh_lookup(struct region_hash *rh, region_t region) 215 { 216 struct region *reg; 217 218 list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list) 219 if (reg->key == region) 220 return reg; 221 222 return NULL; 223 } 224 225 static void __rh_insert(struct region_hash *rh, struct region *reg) 226 { 227 unsigned int h = rh_hash(rh, reg->key); 228 list_add(®->hash_list, rh->buckets + h); 229 } 230 231 static struct region *__rh_alloc(struct region_hash *rh, region_t region) 232 { 233 struct region *reg, *nreg; 234 235 read_unlock(&rh->hash_lock); 236 nreg = mempool_alloc(rh->region_pool, GFP_NOIO); 237 nreg->state = rh->log->type->in_sync(rh->log, region, 1) ? 238 RH_CLEAN : RH_NOSYNC; 239 nreg->rh = rh; 240 nreg->key = region; 241 242 INIT_LIST_HEAD(&nreg->list); 243 244 atomic_set(&nreg->pending, 0); 245 bio_list_init(&nreg->delayed_bios); 246 write_lock_irq(&rh->hash_lock); 247 248 reg = __rh_lookup(rh, region); 249 if (reg) 250 /* we lost the race */ 251 mempool_free(nreg, rh->region_pool); 252 253 else { 254 __rh_insert(rh, nreg); 255 if (nreg->state == RH_CLEAN) { 256 spin_lock(&rh->region_lock); 257 list_add(&nreg->list, &rh->clean_regions); 258 spin_unlock(&rh->region_lock); 259 } 260 reg = nreg; 261 } 262 write_unlock_irq(&rh->hash_lock); 263 read_lock(&rh->hash_lock); 264 265 return reg; 266 } 267 268 static inline struct region *__rh_find(struct region_hash *rh, region_t region) 269 { 270 struct region *reg; 271 272 reg = __rh_lookup(rh, region); 273 if (!reg) 274 reg = __rh_alloc(rh, region); 275 276 return reg; 277 } 278 279 static int rh_state(struct region_hash *rh, region_t region, int may_block) 280 { 281 int r; 282 struct region *reg; 283 284 read_lock(&rh->hash_lock); 285 reg = __rh_lookup(rh, region); 286 read_unlock(&rh->hash_lock); 287 288 if (reg) 289 return reg->state; 290 291 /* 292 * The region wasn't in the hash, so we fall back to the 293 * dirty log. 294 */ 295 r = rh->log->type->in_sync(rh->log, region, may_block); 296 297 /* 298 * Any error from the dirty log (eg. -EWOULDBLOCK) gets 299 * taken as a RH_NOSYNC 300 */ 301 return r == 1 ? RH_CLEAN : RH_NOSYNC; 302 } 303 304 static inline int rh_in_sync(struct region_hash *rh, 305 region_t region, int may_block) 306 { 307 int state = rh_state(rh, region, may_block); 308 return state == RH_CLEAN || state == RH_DIRTY; 309 } 310 311 static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list) 312 { 313 struct bio *bio; 314 315 while ((bio = bio_list_pop(bio_list))) { 316 queue_bio(ms, bio, WRITE); 317 } 318 } 319 320 static void rh_update_states(struct region_hash *rh) 321 { 322 struct region *reg, *next; 323 324 LIST_HEAD(clean); 325 LIST_HEAD(recovered); 326 327 /* 328 * Quickly grab the lists. 329 */ 330 write_lock_irq(&rh->hash_lock); 331 spin_lock(&rh->region_lock); 332 if (!list_empty(&rh->clean_regions)) { 333 list_splice(&rh->clean_regions, &clean); 334 INIT_LIST_HEAD(&rh->clean_regions); 335 336 list_for_each_entry (reg, &clean, list) { 337 rh->log->type->clear_region(rh->log, reg->key); 338 list_del(®->hash_list); 339 } 340 } 341 342 if (!list_empty(&rh->recovered_regions)) { 343 list_splice(&rh->recovered_regions, &recovered); 344 INIT_LIST_HEAD(&rh->recovered_regions); 345 346 list_for_each_entry (reg, &recovered, list) 347 list_del(®->hash_list); 348 } 349 spin_unlock(&rh->region_lock); 350 write_unlock_irq(&rh->hash_lock); 351 352 /* 353 * All the regions on the recovered and clean lists have 354 * now been pulled out of the system, so no need to do 355 * any more locking. 356 */ 357 list_for_each_entry_safe (reg, next, &recovered, list) { 358 rh->log->type->clear_region(rh->log, reg->key); 359 rh->log->type->complete_resync_work(rh->log, reg->key, 1); 360 dispatch_bios(rh->ms, ®->delayed_bios); 361 up(&rh->recovery_count); 362 mempool_free(reg, rh->region_pool); 363 } 364 365 if (!list_empty(&recovered)) 366 rh->log->type->flush(rh->log); 367 368 list_for_each_entry_safe (reg, next, &clean, list) 369 mempool_free(reg, rh->region_pool); 370 } 371 372 static void rh_inc(struct region_hash *rh, region_t region) 373 { 374 struct region *reg; 375 376 read_lock(&rh->hash_lock); 377 reg = __rh_find(rh, region); 378 379 spin_lock_irq(&rh->region_lock); 380 atomic_inc(®->pending); 381 382 if (reg->state == RH_CLEAN) { 383 reg->state = RH_DIRTY; 384 list_del_init(®->list); /* take off the clean list */ 385 spin_unlock_irq(&rh->region_lock); 386 387 rh->log->type->mark_region(rh->log, reg->key); 388 } else 389 spin_unlock_irq(&rh->region_lock); 390 391 392 read_unlock(&rh->hash_lock); 393 } 394 395 static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios) 396 { 397 struct bio *bio; 398 399 for (bio = bios->head; bio; bio = bio->bi_next) 400 rh_inc(rh, bio_to_region(rh, bio)); 401 } 402 403 static void rh_dec(struct region_hash *rh, region_t region) 404 { 405 unsigned long flags; 406 struct region *reg; 407 int should_wake = 0; 408 409 read_lock(&rh->hash_lock); 410 reg = __rh_lookup(rh, region); 411 read_unlock(&rh->hash_lock); 412 413 spin_lock_irqsave(&rh->region_lock, flags); 414 if (atomic_dec_and_test(®->pending)) { 415 if (reg->state == RH_RECOVERING) { 416 list_add_tail(®->list, &rh->quiesced_regions); 417 } else { 418 reg->state = RH_CLEAN; 419 list_add(®->list, &rh->clean_regions); 420 } 421 should_wake = 1; 422 } 423 spin_unlock_irqrestore(&rh->region_lock, flags); 424 425 if (should_wake) 426 wake(); 427 } 428 429 /* 430 * Starts quiescing a region in preparation for recovery. 431 */ 432 static int __rh_recovery_prepare(struct region_hash *rh) 433 { 434 int r; 435 struct region *reg; 436 region_t region; 437 438 /* 439 * Ask the dirty log what's next. 440 */ 441 r = rh->log->type->get_resync_work(rh->log, ®ion); 442 if (r <= 0) 443 return r; 444 445 /* 446 * Get this region, and start it quiescing by setting the 447 * recovering flag. 448 */ 449 read_lock(&rh->hash_lock); 450 reg = __rh_find(rh, region); 451 read_unlock(&rh->hash_lock); 452 453 spin_lock_irq(&rh->region_lock); 454 reg->state = RH_RECOVERING; 455 456 /* Already quiesced ? */ 457 if (atomic_read(®->pending)) 458 list_del_init(®->list); 459 460 else { 461 list_del_init(®->list); 462 list_add(®->list, &rh->quiesced_regions); 463 } 464 spin_unlock_irq(&rh->region_lock); 465 466 return 1; 467 } 468 469 static void rh_recovery_prepare(struct region_hash *rh) 470 { 471 while (!down_trylock(&rh->recovery_count)) 472 if (__rh_recovery_prepare(rh) <= 0) { 473 up(&rh->recovery_count); 474 break; 475 } 476 } 477 478 /* 479 * Returns any quiesced regions. 480 */ 481 static struct region *rh_recovery_start(struct region_hash *rh) 482 { 483 struct region *reg = NULL; 484 485 spin_lock_irq(&rh->region_lock); 486 if (!list_empty(&rh->quiesced_regions)) { 487 reg = list_entry(rh->quiesced_regions.next, 488 struct region, list); 489 list_del_init(®->list); /* remove from the quiesced list */ 490 } 491 spin_unlock_irq(&rh->region_lock); 492 493 return reg; 494 } 495 496 /* FIXME: success ignored for now */ 497 static void rh_recovery_end(struct region *reg, int success) 498 { 499 struct region_hash *rh = reg->rh; 500 501 spin_lock_irq(&rh->region_lock); 502 list_add(®->list, ®->rh->recovered_regions); 503 spin_unlock_irq(&rh->region_lock); 504 505 wake(); 506 } 507 508 static void rh_flush(struct region_hash *rh) 509 { 510 rh->log->type->flush(rh->log); 511 } 512 513 static void rh_delay(struct region_hash *rh, struct bio *bio) 514 { 515 struct region *reg; 516 517 read_lock(&rh->hash_lock); 518 reg = __rh_find(rh, bio_to_region(rh, bio)); 519 bio_list_add(®->delayed_bios, bio); 520 read_unlock(&rh->hash_lock); 521 } 522 523 static void rh_stop_recovery(struct region_hash *rh) 524 { 525 int i; 526 527 /* wait for any recovering regions */ 528 for (i = 0; i < MAX_RECOVERY; i++) 529 down(&rh->recovery_count); 530 } 531 532 static void rh_start_recovery(struct region_hash *rh) 533 { 534 int i; 535 536 for (i = 0; i < MAX_RECOVERY; i++) 537 up(&rh->recovery_count); 538 539 wake(); 540 } 541 542 /*----------------------------------------------------------------- 543 * Mirror set structures. 544 *---------------------------------------------------------------*/ 545 struct mirror { 546 atomic_t error_count; 547 struct dm_dev *dev; 548 sector_t offset; 549 }; 550 551 struct mirror_set { 552 struct dm_target *ti; 553 struct list_head list; 554 struct region_hash rh; 555 struct kcopyd_client *kcopyd_client; 556 557 spinlock_t lock; /* protects the next two lists */ 558 struct bio_list reads; 559 struct bio_list writes; 560 561 /* recovery */ 562 region_t nr_regions; 563 int in_sync; 564 565 unsigned int nr_mirrors; 566 struct mirror mirror[0]; 567 }; 568 569 /* 570 * Every mirror should look like this one. 571 */ 572 #define DEFAULT_MIRROR 0 573 574 /* 575 * This is yucky. We squirrel the mirror_set struct away inside 576 * bi_next for write buffers. This is safe since the bh 577 * doesn't get submitted to the lower levels of block layer. 578 */ 579 static struct mirror_set *bio_get_ms(struct bio *bio) 580 { 581 return (struct mirror_set *) bio->bi_next; 582 } 583 584 static void bio_set_ms(struct bio *bio, struct mirror_set *ms) 585 { 586 bio->bi_next = (struct bio *) ms; 587 } 588 589 /*----------------------------------------------------------------- 590 * Recovery. 591 * 592 * When a mirror is first activated we may find that some regions 593 * are in the no-sync state. We have to recover these by 594 * recopying from the default mirror to all the others. 595 *---------------------------------------------------------------*/ 596 static void recovery_complete(int read_err, unsigned int write_err, 597 void *context) 598 { 599 struct region *reg = (struct region *) context; 600 601 /* FIXME: better error handling */ 602 rh_recovery_end(reg, read_err || write_err); 603 } 604 605 static int recover(struct mirror_set *ms, struct region *reg) 606 { 607 int r; 608 unsigned int i; 609 struct io_region from, to[KCOPYD_MAX_REGIONS], *dest; 610 struct mirror *m; 611 unsigned long flags = 0; 612 613 /* fill in the source */ 614 m = ms->mirror + DEFAULT_MIRROR; 615 from.bdev = m->dev->bdev; 616 from.sector = m->offset + region_to_sector(reg->rh, reg->key); 617 if (reg->key == (ms->nr_regions - 1)) { 618 /* 619 * The final region may be smaller than 620 * region_size. 621 */ 622 from.count = ms->ti->len & (reg->rh->region_size - 1); 623 if (!from.count) 624 from.count = reg->rh->region_size; 625 } else 626 from.count = reg->rh->region_size; 627 628 /* fill in the destinations */ 629 for (i = 0, dest = to; i < ms->nr_mirrors; i++) { 630 if (i == DEFAULT_MIRROR) 631 continue; 632 633 m = ms->mirror + i; 634 dest->bdev = m->dev->bdev; 635 dest->sector = m->offset + region_to_sector(reg->rh, reg->key); 636 dest->count = from.count; 637 dest++; 638 } 639 640 /* hand to kcopyd */ 641 set_bit(KCOPYD_IGNORE_ERROR, &flags); 642 r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags, 643 recovery_complete, reg); 644 645 return r; 646 } 647 648 static void do_recovery(struct mirror_set *ms) 649 { 650 int r; 651 struct region *reg; 652 struct dirty_log *log = ms->rh.log; 653 654 /* 655 * Start quiescing some regions. 656 */ 657 rh_recovery_prepare(&ms->rh); 658 659 /* 660 * Copy any already quiesced regions. 661 */ 662 while ((reg = rh_recovery_start(&ms->rh))) { 663 r = recover(ms, reg); 664 if (r) 665 rh_recovery_end(reg, 0); 666 } 667 668 /* 669 * Update the in sync flag. 670 */ 671 if (!ms->in_sync && 672 (log->type->get_sync_count(log) == ms->nr_regions)) { 673 /* the sync is complete */ 674 dm_table_event(ms->ti->table); 675 ms->in_sync = 1; 676 } 677 } 678 679 /*----------------------------------------------------------------- 680 * Reads 681 *---------------------------------------------------------------*/ 682 static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector) 683 { 684 /* FIXME: add read balancing */ 685 return ms->mirror + DEFAULT_MIRROR; 686 } 687 688 /* 689 * remap a buffer to a particular mirror. 690 */ 691 static void map_bio(struct mirror_set *ms, struct mirror *m, struct bio *bio) 692 { 693 bio->bi_bdev = m->dev->bdev; 694 bio->bi_sector = m->offset + (bio->bi_sector - ms->ti->begin); 695 } 696 697 static void do_reads(struct mirror_set *ms, struct bio_list *reads) 698 { 699 region_t region; 700 struct bio *bio; 701 struct mirror *m; 702 703 while ((bio = bio_list_pop(reads))) { 704 region = bio_to_region(&ms->rh, bio); 705 706 /* 707 * We can only read balance if the region is in sync. 708 */ 709 if (rh_in_sync(&ms->rh, region, 0)) 710 m = choose_mirror(ms, bio->bi_sector); 711 else 712 m = ms->mirror + DEFAULT_MIRROR; 713 714 map_bio(ms, m, bio); 715 generic_make_request(bio); 716 } 717 } 718 719 /*----------------------------------------------------------------- 720 * Writes. 721 * 722 * We do different things with the write io depending on the 723 * state of the region that it's in: 724 * 725 * SYNC: increment pending, use kcopyd to write to *all* mirrors 726 * RECOVERING: delay the io until recovery completes 727 * NOSYNC: increment pending, just write to the default mirror 728 *---------------------------------------------------------------*/ 729 static void write_callback(unsigned long error, void *context) 730 { 731 unsigned int i; 732 int uptodate = 1; 733 struct bio *bio = (struct bio *) context; 734 struct mirror_set *ms; 735 736 ms = bio_get_ms(bio); 737 bio_set_ms(bio, NULL); 738 739 /* 740 * NOTE: We don't decrement the pending count here, 741 * instead it is done by the targets endio function. 742 * This way we handle both writes to SYNC and NOSYNC 743 * regions with the same code. 744 */ 745 746 if (error) { 747 /* 748 * only error the io if all mirrors failed. 749 * FIXME: bogus 750 */ 751 uptodate = 0; 752 for (i = 0; i < ms->nr_mirrors; i++) 753 if (!test_bit(i, &error)) { 754 uptodate = 1; 755 break; 756 } 757 } 758 bio_endio(bio, bio->bi_size, 0); 759 } 760 761 static void do_write(struct mirror_set *ms, struct bio *bio) 762 { 763 unsigned int i; 764 struct io_region io[KCOPYD_MAX_REGIONS+1]; 765 struct mirror *m; 766 767 for (i = 0; i < ms->nr_mirrors; i++) { 768 m = ms->mirror + i; 769 770 io[i].bdev = m->dev->bdev; 771 io[i].sector = m->offset + (bio->bi_sector - ms->ti->begin); 772 io[i].count = bio->bi_size >> 9; 773 } 774 775 bio_set_ms(bio, ms); 776 dm_io_async_bvec(ms->nr_mirrors, io, WRITE, 777 bio->bi_io_vec + bio->bi_idx, 778 write_callback, bio); 779 } 780 781 static void do_writes(struct mirror_set *ms, struct bio_list *writes) 782 { 783 int state; 784 struct bio *bio; 785 struct bio_list sync, nosync, recover, *this_list = NULL; 786 787 if (!writes->head) 788 return; 789 790 /* 791 * Classify each write. 792 */ 793 bio_list_init(&sync); 794 bio_list_init(&nosync); 795 bio_list_init(&recover); 796 797 while ((bio = bio_list_pop(writes))) { 798 state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1); 799 switch (state) { 800 case RH_CLEAN: 801 case RH_DIRTY: 802 this_list = &sync; 803 break; 804 805 case RH_NOSYNC: 806 this_list = &nosync; 807 break; 808 809 case RH_RECOVERING: 810 this_list = &recover; 811 break; 812 } 813 814 bio_list_add(this_list, bio); 815 } 816 817 /* 818 * Increment the pending counts for any regions that will 819 * be written to (writes to recover regions are going to 820 * be delayed). 821 */ 822 rh_inc_pending(&ms->rh, &sync); 823 rh_inc_pending(&ms->rh, &nosync); 824 rh_flush(&ms->rh); 825 826 /* 827 * Dispatch io. 828 */ 829 while ((bio = bio_list_pop(&sync))) 830 do_write(ms, bio); 831 832 while ((bio = bio_list_pop(&recover))) 833 rh_delay(&ms->rh, bio); 834 835 while ((bio = bio_list_pop(&nosync))) { 836 map_bio(ms, ms->mirror + DEFAULT_MIRROR, bio); 837 generic_make_request(bio); 838 } 839 } 840 841 /*----------------------------------------------------------------- 842 * kmirrord 843 *---------------------------------------------------------------*/ 844 static LIST_HEAD(_mirror_sets); 845 static DECLARE_RWSEM(_mirror_sets_lock); 846 847 static void do_mirror(struct mirror_set *ms) 848 { 849 struct bio_list reads, writes; 850 851 spin_lock(&ms->lock); 852 reads = ms->reads; 853 writes = ms->writes; 854 bio_list_init(&ms->reads); 855 bio_list_init(&ms->writes); 856 spin_unlock(&ms->lock); 857 858 rh_update_states(&ms->rh); 859 do_recovery(ms); 860 do_reads(ms, &reads); 861 do_writes(ms, &writes); 862 } 863 864 static void do_work(void *ignored) 865 { 866 struct mirror_set *ms; 867 868 down_read(&_mirror_sets_lock); 869 list_for_each_entry (ms, &_mirror_sets, list) 870 do_mirror(ms); 871 up_read(&_mirror_sets_lock); 872 } 873 874 /*----------------------------------------------------------------- 875 * Target functions 876 *---------------------------------------------------------------*/ 877 static struct mirror_set *alloc_context(unsigned int nr_mirrors, 878 uint32_t region_size, 879 struct dm_target *ti, 880 struct dirty_log *dl) 881 { 882 size_t len; 883 struct mirror_set *ms = NULL; 884 885 if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors)) 886 return NULL; 887 888 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors); 889 890 ms = kmalloc(len, GFP_KERNEL); 891 if (!ms) { 892 ti->error = "dm-mirror: Cannot allocate mirror context"; 893 return NULL; 894 } 895 896 memset(ms, 0, len); 897 spin_lock_init(&ms->lock); 898 899 ms->ti = ti; 900 ms->nr_mirrors = nr_mirrors; 901 ms->nr_regions = dm_sector_div_up(ti->len, region_size); 902 ms->in_sync = 0; 903 904 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) { 905 ti->error = "dm-mirror: Error creating dirty region hash"; 906 kfree(ms); 907 return NULL; 908 } 909 910 return ms; 911 } 912 913 static void free_context(struct mirror_set *ms, struct dm_target *ti, 914 unsigned int m) 915 { 916 while (m--) 917 dm_put_device(ti, ms->mirror[m].dev); 918 919 rh_exit(&ms->rh); 920 kfree(ms); 921 } 922 923 static inline int _check_region_size(struct dm_target *ti, uint32_t size) 924 { 925 return !(size % (PAGE_SIZE >> 9) || (size & (size - 1)) || 926 size > ti->len); 927 } 928 929 static int get_mirror(struct mirror_set *ms, struct dm_target *ti, 930 unsigned int mirror, char **argv) 931 { 932 sector_t offset; 933 934 if (sscanf(argv[1], SECTOR_FORMAT, &offset) != 1) { 935 ti->error = "dm-mirror: Invalid offset"; 936 return -EINVAL; 937 } 938 939 if (dm_get_device(ti, argv[0], offset, ti->len, 940 dm_table_get_mode(ti->table), 941 &ms->mirror[mirror].dev)) { 942 ti->error = "dm-mirror: Device lookup failure"; 943 return -ENXIO; 944 } 945 946 ms->mirror[mirror].offset = offset; 947 948 return 0; 949 } 950 951 static int add_mirror_set(struct mirror_set *ms) 952 { 953 down_write(&_mirror_sets_lock); 954 list_add_tail(&ms->list, &_mirror_sets); 955 up_write(&_mirror_sets_lock); 956 wake(); 957 958 return 0; 959 } 960 961 static void del_mirror_set(struct mirror_set *ms) 962 { 963 down_write(&_mirror_sets_lock); 964 list_del(&ms->list); 965 up_write(&_mirror_sets_lock); 966 } 967 968 /* 969 * Create dirty log: log_type #log_params <log_params> 970 */ 971 static struct dirty_log *create_dirty_log(struct dm_target *ti, 972 unsigned int argc, char **argv, 973 unsigned int *args_used) 974 { 975 unsigned int param_count; 976 struct dirty_log *dl; 977 978 if (argc < 2) { 979 ti->error = "dm-mirror: Insufficient mirror log arguments"; 980 return NULL; 981 } 982 983 if (sscanf(argv[1], "%u", ¶m_count) != 1) { 984 ti->error = "dm-mirror: Invalid mirror log argument count"; 985 return NULL; 986 } 987 988 *args_used = 2 + param_count; 989 990 if (argc < *args_used) { 991 ti->error = "dm-mirror: Insufficient mirror log arguments"; 992 return NULL; 993 } 994 995 dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2); 996 if (!dl) { 997 ti->error = "dm-mirror: Error creating mirror dirty log"; 998 return NULL; 999 } 1000 1001 if (!_check_region_size(ti, dl->type->get_region_size(dl))) { 1002 ti->error = "dm-mirror: Invalid region size"; 1003 dm_destroy_dirty_log(dl); 1004 return NULL; 1005 } 1006 1007 return dl; 1008 } 1009 1010 /* 1011 * Construct a mirror mapping: 1012 * 1013 * log_type #log_params <log_params> 1014 * #mirrors [mirror_path offset]{2,} 1015 * 1016 * log_type is "core" or "disk" 1017 * #log_params is between 1 and 3 1018 */ 1019 #define DM_IO_PAGES 64 1020 static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1021 { 1022 int r; 1023 unsigned int nr_mirrors, m, args_used; 1024 struct mirror_set *ms; 1025 struct dirty_log *dl; 1026 1027 dl = create_dirty_log(ti, argc, argv, &args_used); 1028 if (!dl) 1029 return -EINVAL; 1030 1031 argv += args_used; 1032 argc -= args_used; 1033 1034 if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 || 1035 nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) { 1036 ti->error = "dm-mirror: Invalid number of mirrors"; 1037 dm_destroy_dirty_log(dl); 1038 return -EINVAL; 1039 } 1040 1041 argv++, argc--; 1042 1043 if (argc != nr_mirrors * 2) { 1044 ti->error = "dm-mirror: Wrong number of mirror arguments"; 1045 dm_destroy_dirty_log(dl); 1046 return -EINVAL; 1047 } 1048 1049 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl); 1050 if (!ms) { 1051 dm_destroy_dirty_log(dl); 1052 return -ENOMEM; 1053 } 1054 1055 /* Get the mirror parameter sets */ 1056 for (m = 0; m < nr_mirrors; m++) { 1057 r = get_mirror(ms, ti, m, argv); 1058 if (r) { 1059 free_context(ms, ti, m); 1060 return r; 1061 } 1062 argv += 2; 1063 argc -= 2; 1064 } 1065 1066 ti->private = ms; 1067 ti->split_io = ms->rh.region_size; 1068 1069 r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client); 1070 if (r) { 1071 free_context(ms, ti, ms->nr_mirrors); 1072 return r; 1073 } 1074 1075 add_mirror_set(ms); 1076 return 0; 1077 } 1078 1079 static void mirror_dtr(struct dm_target *ti) 1080 { 1081 struct mirror_set *ms = (struct mirror_set *) ti->private; 1082 1083 del_mirror_set(ms); 1084 kcopyd_client_destroy(ms->kcopyd_client); 1085 free_context(ms, ti, ms->nr_mirrors); 1086 } 1087 1088 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) 1089 { 1090 int should_wake = 0; 1091 struct bio_list *bl; 1092 1093 bl = (rw == WRITE) ? &ms->writes : &ms->reads; 1094 spin_lock(&ms->lock); 1095 should_wake = !(bl->head); 1096 bio_list_add(bl, bio); 1097 spin_unlock(&ms->lock); 1098 1099 if (should_wake) 1100 wake(); 1101 } 1102 1103 /* 1104 * Mirror mapping function 1105 */ 1106 static int mirror_map(struct dm_target *ti, struct bio *bio, 1107 union map_info *map_context) 1108 { 1109 int r, rw = bio_rw(bio); 1110 struct mirror *m; 1111 struct mirror_set *ms = ti->private; 1112 1113 map_context->ll = bio->bi_sector >> ms->rh.region_shift; 1114 1115 if (rw == WRITE) { 1116 queue_bio(ms, bio, rw); 1117 return 0; 1118 } 1119 1120 r = ms->rh.log->type->in_sync(ms->rh.log, 1121 bio_to_region(&ms->rh, bio), 0); 1122 if (r < 0 && r != -EWOULDBLOCK) 1123 return r; 1124 1125 if (r == -EWOULDBLOCK) /* FIXME: ugly */ 1126 r = 0; 1127 1128 /* 1129 * We don't want to fast track a recovery just for a read 1130 * ahead. So we just let it silently fail. 1131 * FIXME: get rid of this. 1132 */ 1133 if (!r && rw == READA) 1134 return -EIO; 1135 1136 if (!r) { 1137 /* Pass this io over to the daemon */ 1138 queue_bio(ms, bio, rw); 1139 return 0; 1140 } 1141 1142 m = choose_mirror(ms, bio->bi_sector); 1143 if (!m) 1144 return -EIO; 1145 1146 map_bio(ms, m, bio); 1147 return 1; 1148 } 1149 1150 static int mirror_end_io(struct dm_target *ti, struct bio *bio, 1151 int error, union map_info *map_context) 1152 { 1153 int rw = bio_rw(bio); 1154 struct mirror_set *ms = (struct mirror_set *) ti->private; 1155 region_t region = map_context->ll; 1156 1157 /* 1158 * We need to dec pending if this was a write. 1159 */ 1160 if (rw == WRITE) 1161 rh_dec(&ms->rh, region); 1162 1163 return 0; 1164 } 1165 1166 static void mirror_postsuspend(struct dm_target *ti) 1167 { 1168 struct mirror_set *ms = (struct mirror_set *) ti->private; 1169 struct dirty_log *log = ms->rh.log; 1170 1171 rh_stop_recovery(&ms->rh); 1172 if (log->type->suspend && log->type->suspend(log)) 1173 /* FIXME: need better error handling */ 1174 DMWARN("log suspend failed"); 1175 } 1176 1177 static void mirror_resume(struct dm_target *ti) 1178 { 1179 struct mirror_set *ms = (struct mirror_set *) ti->private; 1180 struct dirty_log *log = ms->rh.log; 1181 if (log->type->resume && log->type->resume(log)) 1182 /* FIXME: need better error handling */ 1183 DMWARN("log resume failed"); 1184 rh_start_recovery(&ms->rh); 1185 } 1186 1187 static int mirror_status(struct dm_target *ti, status_type_t type, 1188 char *result, unsigned int maxlen) 1189 { 1190 unsigned int m, sz; 1191 struct mirror_set *ms = (struct mirror_set *) ti->private; 1192 1193 sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen); 1194 1195 switch (type) { 1196 case STATUSTYPE_INFO: 1197 DMEMIT("%d ", ms->nr_mirrors); 1198 for (m = 0; m < ms->nr_mirrors; m++) 1199 DMEMIT("%s ", ms->mirror[m].dev->name); 1200 1201 DMEMIT(SECTOR_FORMAT "/" SECTOR_FORMAT, 1202 ms->rh.log->type->get_sync_count(ms->rh.log), 1203 ms->nr_regions); 1204 break; 1205 1206 case STATUSTYPE_TABLE: 1207 DMEMIT("%d ", ms->nr_mirrors); 1208 for (m = 0; m < ms->nr_mirrors; m++) 1209 DMEMIT("%s " SECTOR_FORMAT " ", 1210 ms->mirror[m].dev->name, ms->mirror[m].offset); 1211 } 1212 1213 return 0; 1214 } 1215 1216 static struct target_type mirror_target = { 1217 .name = "mirror", 1218 .version = {1, 0, 1}, 1219 .module = THIS_MODULE, 1220 .ctr = mirror_ctr, 1221 .dtr = mirror_dtr, 1222 .map = mirror_map, 1223 .end_io = mirror_end_io, 1224 .postsuspend = mirror_postsuspend, 1225 .resume = mirror_resume, 1226 .status = mirror_status, 1227 }; 1228 1229 static int __init dm_mirror_init(void) 1230 { 1231 int r; 1232 1233 r = dm_dirty_log_init(); 1234 if (r) 1235 return r; 1236 1237 _kmirrord_wq = create_singlethread_workqueue("kmirrord"); 1238 if (!_kmirrord_wq) { 1239 DMERR("couldn't start kmirrord"); 1240 dm_dirty_log_exit(); 1241 return r; 1242 } 1243 INIT_WORK(&_kmirrord_work, do_work, NULL); 1244 1245 r = dm_register_target(&mirror_target); 1246 if (r < 0) { 1247 DMERR("%s: Failed to register mirror target", 1248 mirror_target.name); 1249 dm_dirty_log_exit(); 1250 destroy_workqueue(_kmirrord_wq); 1251 } 1252 1253 return r; 1254 } 1255 1256 static void __exit dm_mirror_exit(void) 1257 { 1258 int r; 1259 1260 r = dm_unregister_target(&mirror_target); 1261 if (r < 0) 1262 DMERR("%s: unregister failed %d", mirror_target.name, r); 1263 1264 destroy_workqueue(_kmirrord_wq); 1265 dm_dirty_log_exit(); 1266 } 1267 1268 /* Module hooks */ 1269 module_init(dm_mirror_init); 1270 module_exit(dm_mirror_exit); 1271 1272 MODULE_DESCRIPTION(DM_NAME " mirror target"); 1273 MODULE_AUTHOR("Joe Thornber"); 1274 MODULE_LICENSE("GPL"); 1275