Lines Matching full:ms
51 struct mirror_set *ms;
99 struct mirror_set *ms = context;
101 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
106 struct mirror_set *ms = timer_container_of(ms, t, timer);
108 clear_bit(0, &ms->timer_pending);
109 wakeup_mirrord(ms);
112 static void delayed_wake(struct mirror_set *ms)
114 if (test_and_set_bit(0, &ms->timer_pending))
117 ms->timer.expires = jiffies + HZ / 5;
118 add_timer(&ms->timer);
126 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
132 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
133 spin_lock_irqsave(&ms->lock, flags);
137 wakeup_mirrord(ms);
138 spin_unlock_irqrestore(&ms->lock, flags);
143 struct mirror_set *ms = context;
147 queue_bio(ms, bio, WRITE);
177 static struct mirror *get_default_mirror(struct mirror_set *ms)
179 return &ms->mirror[atomic_read(&ms->default_mirror)];
184 struct mirror_set *ms = m->ms;
185 struct mirror *m0 = &(ms->mirror[0]);
187 atomic_set(&ms->default_mirror, m - m0);
190 static struct mirror *get_valid_mirror(struct mirror_set *ms)
194 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
217 struct mirror_set *ms = m->ms;
220 ms->leg_failure = 1;
232 if (!errors_handled(ms))
235 if (m != get_default_mirror(ms))
238 if (!ms->in_sync && !keep_log(ms)) {
248 new = get_valid_mirror(ms);
255 queue_work(dm_raid1_wq, &ms->trigger_event);
260 struct mirror_set *ms = ti->private;
270 .client = ms->io_client,
273 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
280 dm_io(&io_req, ms->nr_mirrors, io, &error_bits, IOPRIO_DEFAULT);
282 for (i = 0; i < ms->nr_mirrors; i++)
284 fail_mirror(ms->mirror + i,
305 struct mirror_set *ms = dm_rh_region_context(reg);
311 fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
321 for (m = 0; m < ms->nr_mirrors; m++) {
322 if (&ms->mirror[m] == get_default_mirror(ms))
325 fail_mirror(ms->mirror + m,
334 static void recover(struct mirror_set *ms, struct dm_region *reg)
341 sector_t region_size = dm_rh_get_region_size(ms->rh);
344 m = get_default_mirror(ms);
346 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
347 if (key == (ms->nr_regions - 1)) {
352 from.count = ms->ti->len & (region_size - 1);
359 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
360 if (&ms->mirror[i] == get_default_mirror(ms))
363 m = ms->mirror + i;
365 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
371 if (!errors_handled(ms))
374 dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
378 static void reset_ms_flags(struct mirror_set *ms)
382 ms->leg_failure = 0;
383 for (m = 0; m < ms->nr_mirrors; m++) {
384 atomic_set(&(ms->mirror[m].error_count), 0);
385 ms->mirror[m].error_type = 0;
389 static void do_recovery(struct mirror_set *ms)
392 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
397 dm_rh_recovery_prepare(ms->rh);
402 while ((reg = dm_rh_recovery_start(ms->rh)))
403 recover(ms, reg);
408 if (!ms->in_sync &&
409 (log->type->get_sync_count(log) == ms->nr_regions)) {
411 dm_table_event(ms->ti->table);
412 ms->in_sync = 1;
413 reset_ms_flags(ms);
422 static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
424 struct mirror *m = get_default_mirror(ms);
430 if (m-- == ms->mirror)
431 m += ms->nr_mirrors;
432 } while (m != get_default_mirror(ms));
439 struct mirror *default_mirror = get_default_mirror(m->ms);
444 static int mirror_available(struct mirror_set *ms, struct bio *bio)
446 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
447 region_t region = dm_rh_bio_to_region(ms->rh, bio);
450 return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0;
462 return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);
479 static void hold_bio(struct mirror_set *ms, struct bio *bio)
485 spin_lock_irq(&ms->lock);
487 if (atomic_read(&ms->suspend)) {
488 spin_unlock_irq(&ms->lock);
493 if (dm_noflush_suspending(ms->ti))
505 bio_list_add(&ms->holds, bio);
506 spin_unlock_irq(&ms->lock);
529 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
532 queue_bio(m->ms, bio, bio_data_dir(bio));
551 .client = m->ms->io_client,
559 static inline int region_in_sync(struct mirror_set *ms, region_t region,
562 int state = dm_rh_get_state(ms->rh, region, may_block);
566 static void do_reads(struct mirror_set *ms, struct bio_list *reads)
573 region = dm_rh_bio_to_region(ms->rh, bio);
574 m = get_default_mirror(ms);
579 if (likely(region_in_sync(ms, region, 1)))
580 m = choose_mirror(ms, bio->bi_iter.bi_sector);
607 struct mirror_set *ms;
611 ms = bio_get_m(bio)->ms;
635 for (i = 0; i < ms->nr_mirrors; i++)
637 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
644 spin_lock_irqsave(&ms->lock, flags);
645 if (!ms->failures.head)
647 bio_list_add(&ms->failures, bio);
649 wakeup_mirrord(ms);
650 spin_unlock_irqrestore(&ms->lock, flags);
653 static void do_write(struct mirror_set *ms, struct bio *bio)
665 .client = ms->io_client,
674 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
681 bio_set_m(bio, get_default_mirror(ms));
683 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL, IOPRIO_DEFAULT));
686 static void do_writes(struct mirror_set *ms, struct bio_list *writes)
692 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
713 region = dm_rh_bio_to_region(ms->rh, bio);
721 state = dm_rh_get_state(ms->rh, region, 1);
745 spin_lock_irq(&ms->lock);
746 bio_list_merge(&ms->writes, &requeue);
747 spin_unlock_irq(&ms->lock);
748 delayed_wake(ms);
756 dm_rh_inc_pending(ms->rh, &sync);
757 dm_rh_inc_pending(ms->rh, &nosync);
764 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
769 if (unlikely(ms->log_failure) && errors_handled(ms)) {
770 spin_lock_irq(&ms->lock);
771 bio_list_merge(&ms->failures, &sync);
772 spin_unlock_irq(&ms->lock);
773 wakeup_mirrord(ms);
776 do_write(ms, bio);
779 dm_rh_delay(ms->rh, bio);
782 if (unlikely(ms->leg_failure) && errors_handled(ms) && !keep_log(ms)) {
783 spin_lock_irq(&ms->lock);
784 bio_list_add(&ms->failures, bio);
785 spin_unlock_irq(&ms->lock);
786 wakeup_mirrord(ms);
788 map_bio(get_default_mirror(ms), bio);
794 static void do_failures(struct mirror_set *ms, struct bio_list *failures)
819 if (!ms->log_failure) {
820 ms->in_sync = 0;
821 dm_rh_mark_nosync(ms->rh, bio);
837 if (unlikely(!get_valid_mirror(ms) || (keep_log(ms) && ms->log_failure)))
839 else if (errors_handled(ms) && !keep_log(ms))
840 hold_bio(ms, bio);
848 struct mirror_set *ms =
851 dm_table_event(ms->ti->table);
861 struct mirror_set *ms = container_of(work, struct mirror_set,
866 spin_lock_irqsave(&ms->lock, flags);
867 reads = ms->reads;
868 writes = ms->writes;
869 failures = ms->failures;
870 bio_list_init(&ms->reads);
871 bio_list_init(&ms->writes);
872 bio_list_init(&ms->failures);
873 spin_unlock_irqrestore(&ms->lock, flags);
875 dm_rh_update_states(ms->rh, errors_handled(ms));
876 do_recovery(ms);
877 do_reads(ms, &reads);
878 do_writes(ms, &writes);
879 do_failures(ms, &failures);
892 struct mirror_set *ms =
893 kzalloc_flex(*ms, mirror, nr_mirrors);
895 if (!ms) {
900 spin_lock_init(&ms->lock);
901 bio_list_init(&ms->reads);
902 bio_list_init(&ms->writes);
903 bio_list_init(&ms->failures);
904 bio_list_init(&ms->holds);
906 ms->ti = ti;
907 ms->nr_mirrors = nr_mirrors;
908 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
909 ms->in_sync = 0;
910 ms->log_failure = 0;
911 ms->leg_failure = 0;
912 atomic_set(&ms->suspend, 0);
913 atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
915 ms->io_client = dm_io_client_create();
916 if (IS_ERR(ms->io_client)) {
918 kfree(ms);
922 ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
924 ms->ti->begin, MAX_RECOVERY,
925 dl, region_size, ms->nr_regions);
926 if (IS_ERR(ms->rh)) {
928 dm_io_client_destroy(ms->io_client);
929 kfree(ms);
933 return ms;
936 static void free_context(struct mirror_set *ms, struct dm_target *ti,
940 dm_put_device(ti, ms->mirror[m].dev);
942 dm_io_client_destroy(ms->io_client);
943 dm_region_hash_destroy(ms->rh);
944 kfree(ms);
947 static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
961 &ms->mirror[mirror].dev);
967 ms->mirror[mirror].ms = ms;
968 atomic_set(&(ms->mirror[mirror].error_count), 0);
969 ms->mirror[mirror].error_type = 0;
970 ms->mirror[mirror].offset = offset;
1013 static int parse_features(struct mirror_set *ms, unsigned int argc, char **argv,
1017 struct dm_target *ti = ms->ti;
1042 ms->features |= DM_RAID1_HANDLE_ERRORS;
1044 ms->features |= DM_RAID1_KEEP_LOG;
1054 if (!errors_handled(ms) && keep_log(ms)) {
1078 struct mirror_set *ms;
1104 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1105 if (!ms) {
1112 r = get_mirror(ms, ti, m, argv);
1114 free_context(ms, ti, m);
1121 ti->private = ms;
1123 r = dm_set_target_max_io_len(ti, dm_rh_get_region_size(ms->rh));
1131 ms->kmirrord_wq = alloc_workqueue("kmirrord",
1133 if (!ms->kmirrord_wq) {
1138 INIT_WORK(&ms->kmirrord_work, do_mirror);
1139 timer_setup(&ms->timer, delayed_wake_fn, 0);
1140 ms->timer_pending = 0;
1141 INIT_WORK(&ms->trigger_event, trigger_event);
1143 r = parse_features(ms, argc, argv, &args_used);
1165 ms->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1166 if (IS_ERR(ms->kcopyd_client)) {
1167 r = PTR_ERR(ms->kcopyd_client);
1171 wakeup_mirrord(ms);
1175 destroy_workqueue(ms->kmirrord_wq);
1177 free_context(ms, ti, ms->nr_mirrors);
1183 struct mirror_set *ms = ti->private;
1185 timer_delete_sync(&ms->timer);
1186 flush_workqueue(ms->kmirrord_wq);
1187 flush_work(&ms->trigger_event);
1188 dm_kcopyd_client_destroy(ms->kcopyd_client);
1189 destroy_workqueue(ms->kmirrord_wq);
1190 free_context(ms, ti, ms->nr_mirrors);
1200 struct mirror_set *ms = ti->private;
1201 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1209 bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
1210 queue_bio(ms, bio, rw);
1214 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
1225 queue_bio(ms, bio, rw);
1233 m = choose_mirror(ms, bio->bi_iter.bi_sector);
1249 struct mirror_set *ms = ti->private;
1261 dm_rh_dec(ms->rh, bio_record->write_region);
1293 if (default_ok(m) || mirror_available(ms, bio)) {
1300 queue_bio(ms, bio, rw);
1314 struct mirror_set *ms = ti->private;
1315 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1320 atomic_set(&ms->suspend, 1);
1325 * a chance to be added in the hold list because ms->suspend
1328 spin_lock_irq(&ms->lock);
1329 holds = ms->holds;
1330 bio_list_init(&ms->holds);
1331 spin_unlock_irq(&ms->lock);
1334 hold_bio(ms, bio);
1340 dm_rh_stop_recovery(ms->rh);
1343 !dm_rh_recovery_in_flight(ms->rh));
1355 flush_workqueue(ms->kmirrord_wq);
1360 struct mirror_set *ms = ti->private;
1361 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1370 struct mirror_set *ms = ti->private;
1371 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1373 atomic_set(&ms->suspend, 0);
1377 dm_rh_start_recovery(ms->rh);
1410 struct mirror_set *ms = ti->private;
1411 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1416 DMEMIT("%d ", ms->nr_mirrors);
1417 for (m = 0; m < ms->nr_mirrors; m++) {
1418 DMEMIT("%s ", ms->mirror[m].dev->name);
1419 buffer[m] = device_status_char(&(ms->mirror[m]));
1425 (unsigned long long)ms->nr_regions, buffer);
1434 DMEMIT("%d", ms->nr_mirrors);
1435 for (m = 0; m < ms->nr_mirrors; m++)
1436 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
1437 (unsigned long long)ms->mirror[m].offset);
1439 num_feature_args += !!errors_handled(ms);
1440 num_feature_args += !!keep_log(ms);
1443 if (errors_handled(ms))
1445 if (keep_log(ms))
1453 DMEMIT(",nr_mirrors=%d", ms->nr_mirrors);
1454 for (m = 0; m < ms->nr_mirrors; m++) {
1455 DMEMIT(",mirror_device_%d=%s", m, ms->mirror[m].dev->name);
1457 m, device_status_char(&(ms->mirror[m])));
1460 DMEMIT(",handle_errors=%c", errors_handled(ms) ? 'y' : 'n');
1461 DMEMIT(",keep_log=%c", keep_log(ms) ? 'y' : 'n');
1473 struct mirror_set *ms = ti->private;
1477 for (i = 0; !ret && i < ms->nr_mirrors; i++)
1478 ret = fn(ti, ms->mirror[i].dev,
1479 ms->mirror[i].offset, ti->len, data);