1 /* 2 * Copyright (C) 2010-2011 Neil Brown 3 * Copyright (C) 2010-2014 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/module.h> 10 11 #include "md.h" 12 #include "raid1.h" 13 #include "raid5.h" 14 #include "raid10.h" 15 #include "bitmap.h" 16 17 #include <linux/device-mapper.h> 18 19 #define DM_MSG_PREFIX "raid" 20 21 static bool devices_handle_discard_safely = false; 22 23 /* 24 * The following flags are used by dm-raid.c to set up the array state. 25 * They must be cleared before md_run is called. 26 */ 27 #define FirstUse 10 /* rdev flag */ 28 29 struct raid_dev { 30 /* 31 * Two DM devices, one to hold metadata and one to hold the 32 * actual data/parity. The reason for this is to not confuse 33 * ti->len and give more flexibility in altering size and 34 * characteristics. 35 * 36 * While it is possible for this device to be associated 37 * with a different physical device than the data_dev, it 38 * is intended for it to be the same. 39 * |--------- Physical Device ---------| 40 * |- meta_dev -|------ data_dev ------| 41 */ 42 struct dm_dev *meta_dev; 43 struct dm_dev *data_dev; 44 struct md_rdev rdev; 45 }; 46 47 /* 48 * Flags for rs->print_flags field. 49 */ 50 #define DMPF_SYNC 0x1 51 #define DMPF_NOSYNC 0x2 52 #define DMPF_REBUILD 0x4 53 #define DMPF_DAEMON_SLEEP 0x8 54 #define DMPF_MIN_RECOVERY_RATE 0x10 55 #define DMPF_MAX_RECOVERY_RATE 0x20 56 #define DMPF_MAX_WRITE_BEHIND 0x40 57 #define DMPF_STRIPE_CACHE 0x80 58 #define DMPF_REGION_SIZE 0x100 59 #define DMPF_RAID10_COPIES 0x200 60 #define DMPF_RAID10_FORMAT 0x400 61 62 struct raid_set { 63 struct dm_target *ti; 64 65 uint32_t bitmap_loaded; 66 uint32_t print_flags; 67 68 struct mddev md; 69 struct raid_type *raid_type; 70 struct dm_target_callbacks callbacks; 71 72 struct raid_dev dev[0]; 73 }; 74 75 /* Supported raid types and properties. */ 76 static struct raid_type { 77 const char *name; /* RAID algorithm. */ 78 const char *descr; /* Descriptor text for logging. */ 79 const unsigned parity_devs; /* # of parity devices. */ 80 const unsigned minimal_devs; /* minimal # of devices in set. */ 81 const unsigned level; /* RAID level. */ 82 const unsigned algorithm; /* RAID algorithm. */ 83 } raid_types[] = { 84 {"raid1", "RAID1 (mirroring)", 0, 2, 1, 0 /* NONE */}, 85 {"raid10", "RAID10 (striped mirrors)", 0, 2, 10, UINT_MAX /* Varies */}, 86 {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, 87 {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC}, 88 {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC}, 89 {"raid5_ls", "RAID5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC}, 90 {"raid5_rs", "RAID5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC}, 91 {"raid6_zr", "RAID6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART}, 92 {"raid6_nr", "RAID6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART}, 93 {"raid6_nc", "RAID6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE} 94 }; 95 96 static char *raid10_md_layout_to_format(int layout) 97 { 98 /* 99 * Bit 16 and 17 stand for "offset" and "use_far_sets" 100 * Refer to MD's raid10.c for details 101 */ 102 if ((layout & 0x10000) && (layout & 0x20000)) 103 return "offset"; 104 105 if ((layout & 0xFF) > 1) 106 return "near"; 107 108 return "far"; 109 } 110 111 static unsigned raid10_md_layout_to_copies(int layout) 112 { 113 if ((layout & 0xFF) > 1) 114 return layout & 0xFF; 115 return (layout >> 8) & 0xFF; 116 } 117 118 static int raid10_format_to_md_layout(char *format, unsigned copies) 119 { 120 unsigned n = 1, f = 1; 121 122 if (!strcmp("near", format)) 123 n = copies; 124 else 125 f = copies; 126 127 if (!strcmp("offset", format)) 128 return 0x30000 | (f << 8) | n; 129 130 if (!strcmp("far", format)) 131 return 0x20000 | (f << 8) | n; 132 133 return (f << 8) | n; 134 } 135 136 static struct raid_type *get_raid_type(char *name) 137 { 138 int i; 139 140 for (i = 0; i < ARRAY_SIZE(raid_types); i++) 141 if (!strcmp(raid_types[i].name, name)) 142 return &raid_types[i]; 143 144 return NULL; 145 } 146 147 static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *raid_type, unsigned raid_devs) 148 { 149 unsigned i; 150 struct raid_set *rs; 151 152 if (raid_devs <= raid_type->parity_devs) { 153 ti->error = "Insufficient number of devices"; 154 return ERR_PTR(-EINVAL); 155 } 156 157 rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL); 158 if (!rs) { 159 ti->error = "Cannot allocate raid context"; 160 return ERR_PTR(-ENOMEM); 161 } 162 163 mddev_init(&rs->md); 164 165 rs->ti = ti; 166 rs->raid_type = raid_type; 167 rs->md.raid_disks = raid_devs; 168 rs->md.level = raid_type->level; 169 rs->md.new_level = rs->md.level; 170 rs->md.layout = raid_type->algorithm; 171 rs->md.new_layout = rs->md.layout; 172 rs->md.delta_disks = 0; 173 rs->md.recovery_cp = 0; 174 175 for (i = 0; i < raid_devs; i++) 176 md_rdev_init(&rs->dev[i].rdev); 177 178 /* 179 * Remaining items to be initialized by further RAID params: 180 * rs->md.persistent 181 * rs->md.external 182 * rs->md.chunk_sectors 183 * rs->md.new_chunk_sectors 184 * rs->md.dev_sectors 185 */ 186 187 return rs; 188 } 189 190 static void context_free(struct raid_set *rs) 191 { 192 int i; 193 194 for (i = 0; i < rs->md.raid_disks; i++) { 195 if (rs->dev[i].meta_dev) 196 dm_put_device(rs->ti, rs->dev[i].meta_dev); 197 md_rdev_clear(&rs->dev[i].rdev); 198 if (rs->dev[i].data_dev) 199 dm_put_device(rs->ti, rs->dev[i].data_dev); 200 } 201 202 kfree(rs); 203 } 204 205 /* 206 * For every device we have two words 207 * <meta_dev>: meta device name or '-' if missing 208 * <data_dev>: data device name or '-' if missing 209 * 210 * The following are permitted: 211 * - - 212 * - <data_dev> 213 * <meta_dev> <data_dev> 214 * 215 * The following is not allowed: 216 * <meta_dev> - 217 * 218 * This code parses those words. If there is a failure, 219 * the caller must use context_free to unwind the operations. 220 */ 221 static int dev_parms(struct raid_set *rs, char **argv) 222 { 223 int i; 224 int rebuild = 0; 225 int metadata_available = 0; 226 int ret = 0; 227 228 for (i = 0; i < rs->md.raid_disks; i++, argv += 2) { 229 rs->dev[i].rdev.raid_disk = i; 230 231 rs->dev[i].meta_dev = NULL; 232 rs->dev[i].data_dev = NULL; 233 234 /* 235 * There are no offsets, since there is a separate device 236 * for data and metadata. 237 */ 238 rs->dev[i].rdev.data_offset = 0; 239 rs->dev[i].rdev.mddev = &rs->md; 240 241 if (strcmp(argv[0], "-")) { 242 ret = dm_get_device(rs->ti, argv[0], 243 dm_table_get_mode(rs->ti->table), 244 &rs->dev[i].meta_dev); 245 rs->ti->error = "RAID metadata device lookup failure"; 246 if (ret) 247 return ret; 248 249 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL); 250 if (!rs->dev[i].rdev.sb_page) 251 return -ENOMEM; 252 } 253 254 if (!strcmp(argv[1], "-")) { 255 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) && 256 (!rs->dev[i].rdev.recovery_offset)) { 257 rs->ti->error = "Drive designated for rebuild not specified"; 258 return -EINVAL; 259 } 260 261 rs->ti->error = "No data device supplied with metadata device"; 262 if (rs->dev[i].meta_dev) 263 return -EINVAL; 264 265 continue; 266 } 267 268 ret = dm_get_device(rs->ti, argv[1], 269 dm_table_get_mode(rs->ti->table), 270 &rs->dev[i].data_dev); 271 if (ret) { 272 rs->ti->error = "RAID device lookup failure"; 273 return ret; 274 } 275 276 if (rs->dev[i].meta_dev) { 277 metadata_available = 1; 278 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev; 279 } 280 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev; 281 list_add(&rs->dev[i].rdev.same_set, &rs->md.disks); 282 if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) 283 rebuild++; 284 } 285 286 if (metadata_available) { 287 rs->md.external = 0; 288 rs->md.persistent = 1; 289 rs->md.major_version = 2; 290 } else if (rebuild && !rs->md.recovery_cp) { 291 /* 292 * Without metadata, we will not be able to tell if the array 293 * is in-sync or not - we must assume it is not. Therefore, 294 * it is impossible to rebuild a drive. 295 * 296 * Even if there is metadata, the on-disk information may 297 * indicate that the array is not in-sync and it will then 298 * fail at that time. 299 * 300 * User could specify 'nosync' option if desperate. 301 */ 302 DMERR("Unable to rebuild drive while array is not in-sync"); 303 rs->ti->error = "RAID device lookup failure"; 304 return -EINVAL; 305 } 306 307 return 0; 308 } 309 310 /* 311 * validate_region_size 312 * @rs 313 * @region_size: region size in sectors. If 0, pick a size (4MiB default). 314 * 315 * Set rs->md.bitmap_info.chunksize (which really refers to 'region size'). 316 * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap. 317 * 318 * Returns: 0 on success, -EINVAL on failure. 319 */ 320 static int validate_region_size(struct raid_set *rs, unsigned long region_size) 321 { 322 unsigned long min_region_size = rs->ti->len / (1 << 21); 323 324 if (!region_size) { 325 /* 326 * Choose a reasonable default. All figures in sectors. 327 */ 328 if (min_region_size > (1 << 13)) { 329 /* If not a power of 2, make it the next power of 2 */ 330 if (min_region_size & (min_region_size - 1)) 331 region_size = 1 << fls(region_size); 332 DMINFO("Choosing default region size of %lu sectors", 333 region_size); 334 } else { 335 DMINFO("Choosing default region size of 4MiB"); 336 region_size = 1 << 13; /* sectors */ 337 } 338 } else { 339 /* 340 * Validate user-supplied value. 341 */ 342 if (region_size > rs->ti->len) { 343 rs->ti->error = "Supplied region size is too large"; 344 return -EINVAL; 345 } 346 347 if (region_size < min_region_size) { 348 DMERR("Supplied region_size (%lu sectors) below minimum (%lu)", 349 region_size, min_region_size); 350 rs->ti->error = "Supplied region size is too small"; 351 return -EINVAL; 352 } 353 354 if (!is_power_of_2(region_size)) { 355 rs->ti->error = "Region size is not a power of 2"; 356 return -EINVAL; 357 } 358 359 if (region_size < rs->md.chunk_sectors) { 360 rs->ti->error = "Region size is smaller than the chunk size"; 361 return -EINVAL; 362 } 363 } 364 365 /* 366 * Convert sectors to bytes. 367 */ 368 rs->md.bitmap_info.chunksize = (region_size << 9); 369 370 return 0; 371 } 372 373 /* 374 * validate_raid_redundancy 375 * @rs 376 * 377 * Determine if there are enough devices in the array that haven't 378 * failed (or are being rebuilt) to form a usable array. 379 * 380 * Returns: 0 on success, -EINVAL on failure. 381 */ 382 static int validate_raid_redundancy(struct raid_set *rs) 383 { 384 unsigned i, rebuild_cnt = 0; 385 unsigned rebuilds_per_group = 0, copies, d; 386 unsigned group_size, last_group_start; 387 388 for (i = 0; i < rs->md.raid_disks; i++) 389 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) || 390 !rs->dev[i].rdev.sb_page) 391 rebuild_cnt++; 392 393 switch (rs->raid_type->level) { 394 case 1: 395 if (rebuild_cnt >= rs->md.raid_disks) 396 goto too_many; 397 break; 398 case 4: 399 case 5: 400 case 6: 401 if (rebuild_cnt > rs->raid_type->parity_devs) 402 goto too_many; 403 break; 404 case 10: 405 copies = raid10_md_layout_to_copies(rs->md.layout); 406 if (rebuild_cnt < copies) 407 break; 408 409 /* 410 * It is possible to have a higher rebuild count for RAID10, 411 * as long as the failed devices occur in different mirror 412 * groups (i.e. different stripes). 413 * 414 * When checking "near" format, make sure no adjacent devices 415 * have failed beyond what can be handled. In addition to the 416 * simple case where the number of devices is a multiple of the 417 * number of copies, we must also handle cases where the number 418 * of devices is not a multiple of the number of copies. 419 * E.g. dev1 dev2 dev3 dev4 dev5 420 * A A B B C 421 * C D D E E 422 */ 423 if (!strcmp("near", raid10_md_layout_to_format(rs->md.layout))) { 424 for (i = 0; i < rs->md.raid_disks * copies; i++) { 425 if (!(i % copies)) 426 rebuilds_per_group = 0; 427 d = i % rs->md.raid_disks; 428 if ((!rs->dev[d].rdev.sb_page || 429 !test_bit(In_sync, &rs->dev[d].rdev.flags)) && 430 (++rebuilds_per_group >= copies)) 431 goto too_many; 432 } 433 break; 434 } 435 436 /* 437 * When checking "far" and "offset" formats, we need to ensure 438 * that the device that holds its copy is not also dead or 439 * being rebuilt. (Note that "far" and "offset" formats only 440 * support two copies right now. These formats also only ever 441 * use the 'use_far_sets' variant.) 442 * 443 * This check is somewhat complicated by the need to account 444 * for arrays that are not a multiple of (far) copies. This 445 * results in the need to treat the last (potentially larger) 446 * set differently. 447 */ 448 group_size = (rs->md.raid_disks / copies); 449 last_group_start = (rs->md.raid_disks / group_size) - 1; 450 last_group_start *= group_size; 451 for (i = 0; i < rs->md.raid_disks; i++) { 452 if (!(i % copies) && !(i > last_group_start)) 453 rebuilds_per_group = 0; 454 if ((!rs->dev[i].rdev.sb_page || 455 !test_bit(In_sync, &rs->dev[i].rdev.flags)) && 456 (++rebuilds_per_group >= copies)) 457 goto too_many; 458 } 459 break; 460 default: 461 if (rebuild_cnt) 462 return -EINVAL; 463 } 464 465 return 0; 466 467 too_many: 468 return -EINVAL; 469 } 470 471 /* 472 * Possible arguments are... 473 * <chunk_size> [optional_args] 474 * 475 * Argument definitions 476 * <chunk_size> The number of sectors per disk that 477 * will form the "stripe" 478 * [[no]sync] Force or prevent recovery of the 479 * entire array 480 * [devices_handle_discard_safely] Allow discards on RAID4/5/6; useful if RAID 481 * member device(s) properly support TRIM/UNMAP 482 * [rebuild <idx>] Rebuild the drive indicated by the index 483 * [daemon_sleep <ms>] Time between bitmap daemon work to 484 * clear bits 485 * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization 486 * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization 487 * [write_mostly <idx>] Indicate a write mostly drive via index 488 * [max_write_behind <sectors>] See '-write-behind=' (man mdadm) 489 * [stripe_cache <sectors>] Stripe cache size for higher RAIDs 490 * [region_size <sectors>] Defines granularity of bitmap 491 * 492 * RAID10-only options: 493 * [raid10_copies <# copies>] Number of copies. (Default: 2) 494 * [raid10_format <near|far|offset>] Layout algorithm. (Default: near) 495 */ 496 static int parse_raid_params(struct raid_set *rs, char **argv, 497 unsigned num_raid_params) 498 { 499 char *raid10_format = "near"; 500 unsigned raid10_copies = 2; 501 unsigned i; 502 unsigned long value, region_size = 0; 503 sector_t sectors_per_dev = rs->ti->len; 504 sector_t max_io_len; 505 char *key; 506 507 /* 508 * First, parse the in-order required arguments 509 * "chunk_size" is the only argument of this type. 510 */ 511 if ((kstrtoul(argv[0], 10, &value) < 0)) { 512 rs->ti->error = "Bad chunk size"; 513 return -EINVAL; 514 } else if (rs->raid_type->level == 1) { 515 if (value) 516 DMERR("Ignoring chunk size parameter for RAID 1"); 517 value = 0; 518 } else if (!is_power_of_2(value)) { 519 rs->ti->error = "Chunk size must be a power of 2"; 520 return -EINVAL; 521 } else if (value < 8) { 522 rs->ti->error = "Chunk size value is too small"; 523 return -EINVAL; 524 } 525 526 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; 527 argv++; 528 num_raid_params--; 529 530 /* 531 * We set each individual device as In_sync with a completed 532 * 'recovery_offset'. If there has been a device failure or 533 * replacement then one of the following cases applies: 534 * 535 * 1) User specifies 'rebuild'. 536 * - Device is reset when param is read. 537 * 2) A new device is supplied. 538 * - No matching superblock found, resets device. 539 * 3) Device failure was transient and returns on reload. 540 * - Failure noticed, resets device for bitmap replay. 541 * 4) Device hadn't completed recovery after previous failure. 542 * - Superblock is read and overrides recovery_offset. 543 * 544 * What is found in the superblocks of the devices is always 545 * authoritative, unless 'rebuild' or '[no]sync' was specified. 546 */ 547 for (i = 0; i < rs->md.raid_disks; i++) { 548 set_bit(In_sync, &rs->dev[i].rdev.flags); 549 rs->dev[i].rdev.recovery_offset = MaxSector; 550 } 551 552 /* 553 * Second, parse the unordered optional arguments 554 */ 555 for (i = 0; i < num_raid_params; i++) { 556 if (!strcasecmp(argv[i], "nosync")) { 557 rs->md.recovery_cp = MaxSector; 558 rs->print_flags |= DMPF_NOSYNC; 559 continue; 560 } 561 if (!strcasecmp(argv[i], "sync")) { 562 rs->md.recovery_cp = 0; 563 rs->print_flags |= DMPF_SYNC; 564 continue; 565 } 566 567 /* The rest of the optional arguments come in key/value pairs */ 568 if ((i + 1) >= num_raid_params) { 569 rs->ti->error = "Wrong number of raid parameters given"; 570 return -EINVAL; 571 } 572 573 key = argv[i++]; 574 575 /* Parameters that take a string value are checked here. */ 576 if (!strcasecmp(key, "raid10_format")) { 577 if (rs->raid_type->level != 10) { 578 rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type"; 579 return -EINVAL; 580 } 581 if (strcmp("near", argv[i]) && 582 strcmp("far", argv[i]) && 583 strcmp("offset", argv[i])) { 584 rs->ti->error = "Invalid 'raid10_format' value given"; 585 return -EINVAL; 586 } 587 raid10_format = argv[i]; 588 rs->print_flags |= DMPF_RAID10_FORMAT; 589 continue; 590 } 591 592 if (kstrtoul(argv[i], 10, &value) < 0) { 593 rs->ti->error = "Bad numerical argument given in raid params"; 594 return -EINVAL; 595 } 596 597 /* Parameters that take a numeric value are checked here */ 598 if (!strcasecmp(key, "rebuild")) { 599 if (value >= rs->md.raid_disks) { 600 rs->ti->error = "Invalid rebuild index given"; 601 return -EINVAL; 602 } 603 clear_bit(In_sync, &rs->dev[value].rdev.flags); 604 rs->dev[value].rdev.recovery_offset = 0; 605 rs->print_flags |= DMPF_REBUILD; 606 } else if (!strcasecmp(key, "write_mostly")) { 607 if (rs->raid_type->level != 1) { 608 rs->ti->error = "write_mostly option is only valid for RAID1"; 609 return -EINVAL; 610 } 611 if (value >= rs->md.raid_disks) { 612 rs->ti->error = "Invalid write_mostly drive index given"; 613 return -EINVAL; 614 } 615 set_bit(WriteMostly, &rs->dev[value].rdev.flags); 616 } else if (!strcasecmp(key, "max_write_behind")) { 617 if (rs->raid_type->level != 1) { 618 rs->ti->error = "max_write_behind option is only valid for RAID1"; 619 return -EINVAL; 620 } 621 rs->print_flags |= DMPF_MAX_WRITE_BEHIND; 622 623 /* 624 * In device-mapper, we specify things in sectors, but 625 * MD records this value in kB 626 */ 627 value /= 2; 628 if (value > COUNTER_MAX) { 629 rs->ti->error = "Max write-behind limit out of range"; 630 return -EINVAL; 631 } 632 rs->md.bitmap_info.max_write_behind = value; 633 } else if (!strcasecmp(key, "daemon_sleep")) { 634 rs->print_flags |= DMPF_DAEMON_SLEEP; 635 if (!value || (value > MAX_SCHEDULE_TIMEOUT)) { 636 rs->ti->error = "daemon sleep period out of range"; 637 return -EINVAL; 638 } 639 rs->md.bitmap_info.daemon_sleep = value; 640 } else if (!strcasecmp(key, "stripe_cache")) { 641 rs->print_flags |= DMPF_STRIPE_CACHE; 642 643 /* 644 * In device-mapper, we specify things in sectors, but 645 * MD records this value in kB 646 */ 647 value /= 2; 648 649 if ((rs->raid_type->level != 5) && 650 (rs->raid_type->level != 6)) { 651 rs->ti->error = "Inappropriate argument: stripe_cache"; 652 return -EINVAL; 653 } 654 if (raid5_set_cache_size(&rs->md, (int)value)) { 655 rs->ti->error = "Bad stripe_cache size"; 656 return -EINVAL; 657 } 658 } else if (!strcasecmp(key, "min_recovery_rate")) { 659 rs->print_flags |= DMPF_MIN_RECOVERY_RATE; 660 if (value > INT_MAX) { 661 rs->ti->error = "min_recovery_rate out of range"; 662 return -EINVAL; 663 } 664 rs->md.sync_speed_min = (int)value; 665 } else if (!strcasecmp(key, "max_recovery_rate")) { 666 rs->print_flags |= DMPF_MAX_RECOVERY_RATE; 667 if (value > INT_MAX) { 668 rs->ti->error = "max_recovery_rate out of range"; 669 return -EINVAL; 670 } 671 rs->md.sync_speed_max = (int)value; 672 } else if (!strcasecmp(key, "region_size")) { 673 rs->print_flags |= DMPF_REGION_SIZE; 674 region_size = value; 675 } else if (!strcasecmp(key, "raid10_copies") && 676 (rs->raid_type->level == 10)) { 677 if ((value < 2) || (value > 0xFF)) { 678 rs->ti->error = "Bad value for 'raid10_copies'"; 679 return -EINVAL; 680 } 681 rs->print_flags |= DMPF_RAID10_COPIES; 682 raid10_copies = value; 683 } else { 684 DMERR("Unable to parse RAID parameter: %s", key); 685 rs->ti->error = "Unable to parse RAID parameters"; 686 return -EINVAL; 687 } 688 } 689 690 if (validate_region_size(rs, region_size)) 691 return -EINVAL; 692 693 if (rs->md.chunk_sectors) 694 max_io_len = rs->md.chunk_sectors; 695 else 696 max_io_len = region_size; 697 698 if (dm_set_target_max_io_len(rs->ti, max_io_len)) 699 return -EINVAL; 700 701 if (rs->raid_type->level == 10) { 702 if (raid10_copies > rs->md.raid_disks) { 703 rs->ti->error = "Not enough devices to satisfy specification"; 704 return -EINVAL; 705 } 706 707 /* 708 * If the format is not "near", we only support 709 * two copies at the moment. 710 */ 711 if (strcmp("near", raid10_format) && (raid10_copies > 2)) { 712 rs->ti->error = "Too many copies for given RAID10 format."; 713 return -EINVAL; 714 } 715 716 /* (Len * #mirrors) / #devices */ 717 sectors_per_dev = rs->ti->len * raid10_copies; 718 sector_div(sectors_per_dev, rs->md.raid_disks); 719 720 rs->md.layout = raid10_format_to_md_layout(raid10_format, 721 raid10_copies); 722 rs->md.new_layout = rs->md.layout; 723 } else if ((rs->raid_type->level > 1) && 724 sector_div(sectors_per_dev, 725 (rs->md.raid_disks - rs->raid_type->parity_devs))) { 726 rs->ti->error = "Target length not divisible by number of data devices"; 727 return -EINVAL; 728 } 729 rs->md.dev_sectors = sectors_per_dev; 730 731 /* Assume there are no metadata devices until the drives are parsed */ 732 rs->md.persistent = 0; 733 rs->md.external = 1; 734 735 return 0; 736 } 737 738 static void do_table_event(struct work_struct *ws) 739 { 740 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work); 741 742 dm_table_event(rs->ti->table); 743 } 744 745 static int raid_is_congested(struct dm_target_callbacks *cb, int bits) 746 { 747 struct raid_set *rs = container_of(cb, struct raid_set, callbacks); 748 749 if (rs->raid_type->level == 1) 750 return md_raid1_congested(&rs->md, bits); 751 752 if (rs->raid_type->level == 10) 753 return md_raid10_congested(&rs->md, bits); 754 755 return md_raid5_congested(&rs->md, bits); 756 } 757 758 /* 759 * This structure is never routinely used by userspace, unlike md superblocks. 760 * Devices with this superblock should only ever be accessed via device-mapper. 761 */ 762 #define DM_RAID_MAGIC 0x64526D44 763 struct dm_raid_superblock { 764 __le32 magic; /* "DmRd" */ 765 __le32 features; /* Used to indicate possible future changes */ 766 767 __le32 num_devices; /* Number of devices in this array. (Max 64) */ 768 __le32 array_position; /* The position of this drive in the array */ 769 770 __le64 events; /* Incremented by md when superblock updated */ 771 __le64 failed_devices; /* Bit field of devices to indicate failures */ 772 773 /* 774 * This offset tracks the progress of the repair or replacement of 775 * an individual drive. 776 */ 777 __le64 disk_recovery_offset; 778 779 /* 780 * This offset tracks the progress of the initial array 781 * synchronisation/parity calculation. 782 */ 783 __le64 array_resync_offset; 784 785 /* 786 * RAID characteristics 787 */ 788 __le32 level; 789 __le32 layout; 790 __le32 stripe_sectors; 791 792 __u8 pad[452]; /* Round struct to 512 bytes. */ 793 /* Always set to 0 when writing. */ 794 } __packed; 795 796 static int read_disk_sb(struct md_rdev *rdev, int size) 797 { 798 BUG_ON(!rdev->sb_page); 799 800 if (rdev->sb_loaded) 801 return 0; 802 803 if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) { 804 DMERR("Failed to read superblock of device at position %d", 805 rdev->raid_disk); 806 md_error(rdev->mddev, rdev); 807 return -EINVAL; 808 } 809 810 rdev->sb_loaded = 1; 811 812 return 0; 813 } 814 815 static void super_sync(struct mddev *mddev, struct md_rdev *rdev) 816 { 817 int i; 818 uint64_t failed_devices; 819 struct dm_raid_superblock *sb; 820 struct raid_set *rs = container_of(mddev, struct raid_set, md); 821 822 sb = page_address(rdev->sb_page); 823 failed_devices = le64_to_cpu(sb->failed_devices); 824 825 for (i = 0; i < mddev->raid_disks; i++) 826 if (!rs->dev[i].data_dev || 827 test_bit(Faulty, &(rs->dev[i].rdev.flags))) 828 failed_devices |= (1ULL << i); 829 830 memset(sb, 0, sizeof(*sb)); 831 832 sb->magic = cpu_to_le32(DM_RAID_MAGIC); 833 sb->features = cpu_to_le32(0); /* No features yet */ 834 835 sb->num_devices = cpu_to_le32(mddev->raid_disks); 836 sb->array_position = cpu_to_le32(rdev->raid_disk); 837 838 sb->events = cpu_to_le64(mddev->events); 839 sb->failed_devices = cpu_to_le64(failed_devices); 840 841 sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset); 842 sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp); 843 844 sb->level = cpu_to_le32(mddev->level); 845 sb->layout = cpu_to_le32(mddev->layout); 846 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); 847 } 848 849 /* 850 * super_load 851 * 852 * This function creates a superblock if one is not found on the device 853 * and will decide which superblock to use if there's a choice. 854 * 855 * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise 856 */ 857 static int super_load(struct md_rdev *rdev, struct md_rdev *refdev) 858 { 859 int ret; 860 struct dm_raid_superblock *sb; 861 struct dm_raid_superblock *refsb; 862 uint64_t events_sb, events_refsb; 863 864 rdev->sb_start = 0; 865 rdev->sb_size = sizeof(*sb); 866 867 ret = read_disk_sb(rdev, rdev->sb_size); 868 if (ret) 869 return ret; 870 871 sb = page_address(rdev->sb_page); 872 873 /* 874 * Two cases that we want to write new superblocks and rebuild: 875 * 1) New device (no matching magic number) 876 * 2) Device specified for rebuild (!In_sync w/ offset == 0) 877 */ 878 if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) || 879 (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) { 880 super_sync(rdev->mddev, rdev); 881 882 set_bit(FirstUse, &rdev->flags); 883 884 /* Force writing of superblocks to disk */ 885 set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags); 886 887 /* Any superblock is better than none, choose that if given */ 888 return refdev ? 0 : 1; 889 } 890 891 if (!refdev) 892 return 1; 893 894 events_sb = le64_to_cpu(sb->events); 895 896 refsb = page_address(refdev->sb_page); 897 events_refsb = le64_to_cpu(refsb->events); 898 899 return (events_sb > events_refsb) ? 1 : 0; 900 } 901 902 static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev) 903 { 904 int role; 905 struct raid_set *rs = container_of(mddev, struct raid_set, md); 906 uint64_t events_sb; 907 uint64_t failed_devices; 908 struct dm_raid_superblock *sb; 909 uint32_t new_devs = 0; 910 uint32_t rebuilds = 0; 911 struct md_rdev *r; 912 struct dm_raid_superblock *sb2; 913 914 sb = page_address(rdev->sb_page); 915 events_sb = le64_to_cpu(sb->events); 916 failed_devices = le64_to_cpu(sb->failed_devices); 917 918 /* 919 * Initialise to 1 if this is a new superblock. 920 */ 921 mddev->events = events_sb ? : 1; 922 923 /* 924 * Reshaping is not currently allowed 925 */ 926 if (le32_to_cpu(sb->level) != mddev->level) { 927 DMERR("Reshaping arrays not yet supported. (RAID level change)"); 928 return -EINVAL; 929 } 930 if (le32_to_cpu(sb->layout) != mddev->layout) { 931 DMERR("Reshaping arrays not yet supported. (RAID layout change)"); 932 DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout); 933 DMERR(" Old layout: %s w/ %d copies", 934 raid10_md_layout_to_format(le32_to_cpu(sb->layout)), 935 raid10_md_layout_to_copies(le32_to_cpu(sb->layout))); 936 DMERR(" New layout: %s w/ %d copies", 937 raid10_md_layout_to_format(mddev->layout), 938 raid10_md_layout_to_copies(mddev->layout)); 939 return -EINVAL; 940 } 941 if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) { 942 DMERR("Reshaping arrays not yet supported. (stripe sectors change)"); 943 return -EINVAL; 944 } 945 946 /* We can only change the number of devices in RAID1 right now */ 947 if ((rs->raid_type->level != 1) && 948 (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) { 949 DMERR("Reshaping arrays not yet supported. (device count change)"); 950 return -EINVAL; 951 } 952 953 if (!(rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC))) 954 mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset); 955 956 /* 957 * During load, we set FirstUse if a new superblock was written. 958 * There are two reasons we might not have a superblock: 959 * 1) The array is brand new - in which case, all of the 960 * devices must have their In_sync bit set. Also, 961 * recovery_cp must be 0, unless forced. 962 * 2) This is a new device being added to an old array 963 * and the new device needs to be rebuilt - in which 964 * case the In_sync bit will /not/ be set and 965 * recovery_cp must be MaxSector. 966 */ 967 rdev_for_each(r, mddev) { 968 if (!test_bit(In_sync, &r->flags)) { 969 DMINFO("Device %d specified for rebuild: " 970 "Clearing superblock", r->raid_disk); 971 rebuilds++; 972 } else if (test_bit(FirstUse, &r->flags)) 973 new_devs++; 974 } 975 976 if (!rebuilds) { 977 if (new_devs == mddev->raid_disks) { 978 DMINFO("Superblocks created for new array"); 979 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags); 980 } else if (new_devs) { 981 DMERR("New device injected " 982 "into existing array without 'rebuild' " 983 "parameter specified"); 984 return -EINVAL; 985 } 986 } else if (new_devs) { 987 DMERR("'rebuild' devices cannot be " 988 "injected into an array with other first-time devices"); 989 return -EINVAL; 990 } else if (mddev->recovery_cp != MaxSector) { 991 DMERR("'rebuild' specified while array is not in-sync"); 992 return -EINVAL; 993 } 994 995 /* 996 * Now we set the Faulty bit for those devices that are 997 * recorded in the superblock as failed. 998 */ 999 rdev_for_each(r, mddev) { 1000 if (!r->sb_page) 1001 continue; 1002 sb2 = page_address(r->sb_page); 1003 sb2->failed_devices = 0; 1004 1005 /* 1006 * Check for any device re-ordering. 1007 */ 1008 if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) { 1009 role = le32_to_cpu(sb2->array_position); 1010 if (role != r->raid_disk) { 1011 if (rs->raid_type->level != 1) { 1012 rs->ti->error = "Cannot change device " 1013 "positions in RAID array"; 1014 return -EINVAL; 1015 } 1016 DMINFO("RAID1 device #%d now at position #%d", 1017 role, r->raid_disk); 1018 } 1019 1020 /* 1021 * Partial recovery is performed on 1022 * returning failed devices. 1023 */ 1024 if (failed_devices & (1 << role)) 1025 set_bit(Faulty, &r->flags); 1026 } 1027 } 1028 1029 return 0; 1030 } 1031 1032 static int super_validate(struct mddev *mddev, struct md_rdev *rdev) 1033 { 1034 struct dm_raid_superblock *sb = page_address(rdev->sb_page); 1035 1036 /* 1037 * If mddev->events is not set, we know we have not yet initialized 1038 * the array. 1039 */ 1040 if (!mddev->events && super_init_validation(mddev, rdev)) 1041 return -EINVAL; 1042 1043 mddev->bitmap_info.offset = 4096 >> 9; /* Enable bitmap creation */ 1044 rdev->mddev->bitmap_info.default_offset = 4096 >> 9; 1045 if (!test_bit(FirstUse, &rdev->flags)) { 1046 rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset); 1047 if (rdev->recovery_offset != MaxSector) 1048 clear_bit(In_sync, &rdev->flags); 1049 } 1050 1051 /* 1052 * If a device comes back, set it as not In_sync and no longer faulty. 1053 */ 1054 if (test_bit(Faulty, &rdev->flags)) { 1055 clear_bit(Faulty, &rdev->flags); 1056 clear_bit(In_sync, &rdev->flags); 1057 rdev->saved_raid_disk = rdev->raid_disk; 1058 rdev->recovery_offset = 0; 1059 } 1060 1061 clear_bit(FirstUse, &rdev->flags); 1062 1063 return 0; 1064 } 1065 1066 /* 1067 * Analyse superblocks and select the freshest. 1068 */ 1069 static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) 1070 { 1071 int ret; 1072 struct raid_dev *dev; 1073 struct md_rdev *rdev, *tmp, *freshest; 1074 struct mddev *mddev = &rs->md; 1075 1076 freshest = NULL; 1077 rdev_for_each_safe(rdev, tmp, mddev) { 1078 /* 1079 * Skipping super_load due to DMPF_SYNC will cause 1080 * the array to undergo initialization again as 1081 * though it were new. This is the intended effect 1082 * of the "sync" directive. 1083 * 1084 * When reshaping capability is added, we must ensure 1085 * that the "sync" directive is disallowed during the 1086 * reshape. 1087 */ 1088 if (rs->print_flags & DMPF_SYNC) 1089 continue; 1090 1091 if (!rdev->meta_bdev) 1092 continue; 1093 1094 ret = super_load(rdev, freshest); 1095 1096 switch (ret) { 1097 case 1: 1098 freshest = rdev; 1099 break; 1100 case 0: 1101 break; 1102 default: 1103 dev = container_of(rdev, struct raid_dev, rdev); 1104 if (dev->meta_dev) 1105 dm_put_device(ti, dev->meta_dev); 1106 1107 dev->meta_dev = NULL; 1108 rdev->meta_bdev = NULL; 1109 1110 if (rdev->sb_page) 1111 put_page(rdev->sb_page); 1112 1113 rdev->sb_page = NULL; 1114 1115 rdev->sb_loaded = 0; 1116 1117 /* 1118 * We might be able to salvage the data device 1119 * even though the meta device has failed. For 1120 * now, we behave as though '- -' had been 1121 * set for this device in the table. 1122 */ 1123 if (dev->data_dev) 1124 dm_put_device(ti, dev->data_dev); 1125 1126 dev->data_dev = NULL; 1127 rdev->bdev = NULL; 1128 1129 list_del(&rdev->same_set); 1130 } 1131 } 1132 1133 if (!freshest) 1134 return 0; 1135 1136 if (validate_raid_redundancy(rs)) { 1137 rs->ti->error = "Insufficient redundancy to activate array"; 1138 return -EINVAL; 1139 } 1140 1141 /* 1142 * Validation of the freshest device provides the source of 1143 * validation for the remaining devices. 1144 */ 1145 ti->error = "Unable to assemble array: Invalid superblocks"; 1146 if (super_validate(mddev, freshest)) 1147 return -EINVAL; 1148 1149 rdev_for_each(rdev, mddev) 1150 if ((rdev != freshest) && super_validate(mddev, rdev)) 1151 return -EINVAL; 1152 1153 return 0; 1154 } 1155 1156 /* 1157 * Enable/disable discard support on RAID set depending on 1158 * RAID level and discard properties of underlying RAID members. 1159 */ 1160 static void configure_discard_support(struct dm_target *ti, struct raid_set *rs) 1161 { 1162 int i; 1163 bool raid456; 1164 1165 /* Assume discards not supported until after checks below. */ 1166 ti->discards_supported = false; 1167 1168 /* RAID level 4,5,6 require discard_zeroes_data for data integrity! */ 1169 raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6); 1170 1171 for (i = 0; i < rs->md.raid_disks; i++) { 1172 struct request_queue *q = bdev_get_queue(rs->dev[i].rdev.bdev); 1173 1174 if (!q || !blk_queue_discard(q)) 1175 return; 1176 1177 if (raid456) { 1178 if (!q->limits.discard_zeroes_data) 1179 return; 1180 if (!devices_handle_discard_safely) { 1181 DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty."); 1182 DMERR("Set dm-raid.devices_handle_discard_safely=Y to override."); 1183 return; 1184 } 1185 } 1186 } 1187 1188 /* All RAID members properly support discards */ 1189 ti->discards_supported = true; 1190 1191 /* 1192 * RAID1 and RAID10 personalities require bio splitting, 1193 * RAID0/4/5/6 don't and process large discard bios properly. 1194 */ 1195 ti->split_discard_bios = !!(rs->md.level == 1 || rs->md.level == 10); 1196 ti->num_discard_bios = 1; 1197 } 1198 1199 /* 1200 * Construct a RAID4/5/6 mapping: 1201 * Args: 1202 * <raid_type> <#raid_params> <raid_params> \ 1203 * <#raid_devs> { <meta_dev1> <dev1> .. <meta_devN> <devN> } 1204 * 1205 * <raid_params> varies by <raid_type>. See 'parse_raid_params' for 1206 * details on possible <raid_params>. 1207 */ 1208 static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) 1209 { 1210 int ret; 1211 struct raid_type *rt; 1212 unsigned long num_raid_params, num_raid_devs; 1213 struct raid_set *rs = NULL; 1214 1215 /* Must have at least <raid_type> <#raid_params> */ 1216 if (argc < 2) { 1217 ti->error = "Too few arguments"; 1218 return -EINVAL; 1219 } 1220 1221 /* raid type */ 1222 rt = get_raid_type(argv[0]); 1223 if (!rt) { 1224 ti->error = "Unrecognised raid_type"; 1225 return -EINVAL; 1226 } 1227 argc--; 1228 argv++; 1229 1230 /* number of RAID parameters */ 1231 if (kstrtoul(argv[0], 10, &num_raid_params) < 0) { 1232 ti->error = "Cannot understand number of RAID parameters"; 1233 return -EINVAL; 1234 } 1235 argc--; 1236 argv++; 1237 1238 /* Skip over RAID params for now and find out # of devices */ 1239 if (num_raid_params + 1 > argc) { 1240 ti->error = "Arguments do not agree with counts given"; 1241 return -EINVAL; 1242 } 1243 1244 if ((kstrtoul(argv[num_raid_params], 10, &num_raid_devs) < 0) || 1245 (num_raid_devs >= INT_MAX)) { 1246 ti->error = "Cannot understand number of raid devices"; 1247 return -EINVAL; 1248 } 1249 1250 rs = context_alloc(ti, rt, (unsigned)num_raid_devs); 1251 if (IS_ERR(rs)) 1252 return PTR_ERR(rs); 1253 1254 ret = parse_raid_params(rs, argv, (unsigned)num_raid_params); 1255 if (ret) 1256 goto bad; 1257 1258 ret = -EINVAL; 1259 1260 argc -= num_raid_params + 1; /* +1: we already have num_raid_devs */ 1261 argv += num_raid_params + 1; 1262 1263 if (argc != (num_raid_devs * 2)) { 1264 ti->error = "Supplied RAID devices does not match the count given"; 1265 goto bad; 1266 } 1267 1268 ret = dev_parms(rs, argv); 1269 if (ret) 1270 goto bad; 1271 1272 rs->md.sync_super = super_sync; 1273 ret = analyse_superblocks(ti, rs); 1274 if (ret) 1275 goto bad; 1276 1277 INIT_WORK(&rs->md.event_work, do_table_event); 1278 ti->private = rs; 1279 ti->num_flush_bios = 1; 1280 1281 /* 1282 * Disable/enable discard support on RAID set. 1283 */ 1284 configure_discard_support(ti, rs); 1285 1286 mutex_lock(&rs->md.reconfig_mutex); 1287 ret = md_run(&rs->md); 1288 rs->md.in_sync = 0; /* Assume already marked dirty */ 1289 mutex_unlock(&rs->md.reconfig_mutex); 1290 1291 if (ret) { 1292 ti->error = "Fail to run raid array"; 1293 goto bad; 1294 } 1295 1296 if (ti->len != rs->md.array_sectors) { 1297 ti->error = "Array size does not match requested target length"; 1298 ret = -EINVAL; 1299 goto size_mismatch; 1300 } 1301 rs->callbacks.congested_fn = raid_is_congested; 1302 dm_table_add_target_callbacks(ti->table, &rs->callbacks); 1303 1304 mddev_suspend(&rs->md); 1305 return 0; 1306 1307 size_mismatch: 1308 md_stop(&rs->md); 1309 bad: 1310 context_free(rs); 1311 1312 return ret; 1313 } 1314 1315 static void raid_dtr(struct dm_target *ti) 1316 { 1317 struct raid_set *rs = ti->private; 1318 1319 list_del_init(&rs->callbacks.list); 1320 md_stop(&rs->md); 1321 context_free(rs); 1322 } 1323 1324 static int raid_map(struct dm_target *ti, struct bio *bio) 1325 { 1326 struct raid_set *rs = ti->private; 1327 struct mddev *mddev = &rs->md; 1328 1329 mddev->pers->make_request(mddev, bio); 1330 1331 return DM_MAPIO_SUBMITTED; 1332 } 1333 1334 static const char *decipher_sync_action(struct mddev *mddev) 1335 { 1336 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 1337 return "frozen"; 1338 1339 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 1340 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) { 1341 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 1342 return "reshape"; 1343 1344 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 1345 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 1346 return "resync"; 1347 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 1348 return "check"; 1349 return "repair"; 1350 } 1351 1352 if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) 1353 return "recover"; 1354 } 1355 1356 return "idle"; 1357 } 1358 1359 static void raid_status(struct dm_target *ti, status_type_t type, 1360 unsigned status_flags, char *result, unsigned maxlen) 1361 { 1362 struct raid_set *rs = ti->private; 1363 unsigned raid_param_cnt = 1; /* at least 1 for chunksize */ 1364 unsigned sz = 0; 1365 int i, array_in_sync = 0; 1366 sector_t sync; 1367 1368 switch (type) { 1369 case STATUSTYPE_INFO: 1370 DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks); 1371 1372 if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery)) 1373 sync = rs->md.curr_resync_completed; 1374 else 1375 sync = rs->md.recovery_cp; 1376 1377 if (sync >= rs->md.resync_max_sectors) { 1378 /* 1379 * Sync complete. 1380 */ 1381 array_in_sync = 1; 1382 sync = rs->md.resync_max_sectors; 1383 } else if (test_bit(MD_RECOVERY_REQUESTED, &rs->md.recovery)) { 1384 /* 1385 * If "check" or "repair" is occurring, the array has 1386 * undergone and initial sync and the health characters 1387 * should not be 'a' anymore. 1388 */ 1389 array_in_sync = 1; 1390 } else { 1391 /* 1392 * The array may be doing an initial sync, or it may 1393 * be rebuilding individual components. If all the 1394 * devices are In_sync, then it is the array that is 1395 * being initialized. 1396 */ 1397 for (i = 0; i < rs->md.raid_disks; i++) 1398 if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) 1399 array_in_sync = 1; 1400 } 1401 1402 /* 1403 * Status characters: 1404 * 'D' = Dead/Failed device 1405 * 'a' = Alive but not in-sync 1406 * 'A' = Alive and in-sync 1407 */ 1408 for (i = 0; i < rs->md.raid_disks; i++) { 1409 if (test_bit(Faulty, &rs->dev[i].rdev.flags)) 1410 DMEMIT("D"); 1411 else if (!array_in_sync || 1412 !test_bit(In_sync, &rs->dev[i].rdev.flags)) 1413 DMEMIT("a"); 1414 else 1415 DMEMIT("A"); 1416 } 1417 1418 /* 1419 * In-sync ratio: 1420 * The in-sync ratio shows the progress of: 1421 * - Initializing the array 1422 * - Rebuilding a subset of devices of the array 1423 * The user can distinguish between the two by referring 1424 * to the status characters. 1425 */ 1426 DMEMIT(" %llu/%llu", 1427 (unsigned long long) sync, 1428 (unsigned long long) rs->md.resync_max_sectors); 1429 1430 /* 1431 * Sync action: 1432 * See Documentation/device-mapper/dm-raid.c for 1433 * information on each of these states. 1434 */ 1435 DMEMIT(" %s", decipher_sync_action(&rs->md)); 1436 1437 /* 1438 * resync_mismatches/mismatch_cnt 1439 * This field shows the number of discrepancies found when 1440 * performing a "check" of the array. 1441 */ 1442 DMEMIT(" %llu", 1443 (strcmp(rs->md.last_sync_action, "check")) ? 0 : 1444 (unsigned long long) 1445 atomic64_read(&rs->md.resync_mismatches)); 1446 break; 1447 case STATUSTYPE_TABLE: 1448 /* The string you would use to construct this array */ 1449 for (i = 0; i < rs->md.raid_disks; i++) { 1450 if ((rs->print_flags & DMPF_REBUILD) && 1451 rs->dev[i].data_dev && 1452 !test_bit(In_sync, &rs->dev[i].rdev.flags)) 1453 raid_param_cnt += 2; /* for rebuilds */ 1454 if (rs->dev[i].data_dev && 1455 test_bit(WriteMostly, &rs->dev[i].rdev.flags)) 1456 raid_param_cnt += 2; 1457 } 1458 1459 raid_param_cnt += (hweight32(rs->print_flags & ~DMPF_REBUILD) * 2); 1460 if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC)) 1461 raid_param_cnt--; 1462 1463 DMEMIT("%s %u %u", rs->raid_type->name, 1464 raid_param_cnt, rs->md.chunk_sectors); 1465 1466 if ((rs->print_flags & DMPF_SYNC) && 1467 (rs->md.recovery_cp == MaxSector)) 1468 DMEMIT(" sync"); 1469 if (rs->print_flags & DMPF_NOSYNC) 1470 DMEMIT(" nosync"); 1471 1472 for (i = 0; i < rs->md.raid_disks; i++) 1473 if ((rs->print_flags & DMPF_REBUILD) && 1474 rs->dev[i].data_dev && 1475 !test_bit(In_sync, &rs->dev[i].rdev.flags)) 1476 DMEMIT(" rebuild %u", i); 1477 1478 if (rs->print_flags & DMPF_DAEMON_SLEEP) 1479 DMEMIT(" daemon_sleep %lu", 1480 rs->md.bitmap_info.daemon_sleep); 1481 1482 if (rs->print_flags & DMPF_MIN_RECOVERY_RATE) 1483 DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min); 1484 1485 if (rs->print_flags & DMPF_MAX_RECOVERY_RATE) 1486 DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max); 1487 1488 for (i = 0; i < rs->md.raid_disks; i++) 1489 if (rs->dev[i].data_dev && 1490 test_bit(WriteMostly, &rs->dev[i].rdev.flags)) 1491 DMEMIT(" write_mostly %u", i); 1492 1493 if (rs->print_flags & DMPF_MAX_WRITE_BEHIND) 1494 DMEMIT(" max_write_behind %lu", 1495 rs->md.bitmap_info.max_write_behind); 1496 1497 if (rs->print_flags & DMPF_STRIPE_CACHE) { 1498 struct r5conf *conf = rs->md.private; 1499 1500 /* convert from kiB to sectors */ 1501 DMEMIT(" stripe_cache %d", 1502 conf ? conf->max_nr_stripes * 2 : 0); 1503 } 1504 1505 if (rs->print_flags & DMPF_REGION_SIZE) 1506 DMEMIT(" region_size %lu", 1507 rs->md.bitmap_info.chunksize >> 9); 1508 1509 if (rs->print_flags & DMPF_RAID10_COPIES) 1510 DMEMIT(" raid10_copies %u", 1511 raid10_md_layout_to_copies(rs->md.layout)); 1512 1513 if (rs->print_flags & DMPF_RAID10_FORMAT) 1514 DMEMIT(" raid10_format %s", 1515 raid10_md_layout_to_format(rs->md.layout)); 1516 1517 DMEMIT(" %d", rs->md.raid_disks); 1518 for (i = 0; i < rs->md.raid_disks; i++) { 1519 if (rs->dev[i].meta_dev) 1520 DMEMIT(" %s", rs->dev[i].meta_dev->name); 1521 else 1522 DMEMIT(" -"); 1523 1524 if (rs->dev[i].data_dev) 1525 DMEMIT(" %s", rs->dev[i].data_dev->name); 1526 else 1527 DMEMIT(" -"); 1528 } 1529 } 1530 } 1531 1532 static int raid_message(struct dm_target *ti, unsigned argc, char **argv) 1533 { 1534 struct raid_set *rs = ti->private; 1535 struct mddev *mddev = &rs->md; 1536 1537 if (!strcasecmp(argv[0], "reshape")) { 1538 DMERR("Reshape not supported."); 1539 return -EINVAL; 1540 } 1541 1542 if (!mddev->pers || !mddev->pers->sync_request) 1543 return -EINVAL; 1544 1545 if (!strcasecmp(argv[0], "frozen")) 1546 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 1547 else 1548 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 1549 1550 if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) { 1551 if (mddev->sync_thread) { 1552 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 1553 md_reap_sync_thread(mddev); 1554 } 1555 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 1556 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 1557 return -EBUSY; 1558 else if (!strcasecmp(argv[0], "resync")) 1559 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 1560 else if (!strcasecmp(argv[0], "recover")) { 1561 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 1562 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 1563 } else { 1564 if (!strcasecmp(argv[0], "check")) 1565 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 1566 else if (!!strcasecmp(argv[0], "repair")) 1567 return -EINVAL; 1568 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 1569 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 1570 } 1571 if (mddev->ro == 2) { 1572 /* A write to sync_action is enough to justify 1573 * canceling read-auto mode 1574 */ 1575 mddev->ro = 0; 1576 if (!mddev->suspended) 1577 md_wakeup_thread(mddev->sync_thread); 1578 } 1579 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 1580 if (!mddev->suspended) 1581 md_wakeup_thread(mddev->thread); 1582 1583 return 0; 1584 } 1585 1586 static int raid_iterate_devices(struct dm_target *ti, 1587 iterate_devices_callout_fn fn, void *data) 1588 { 1589 struct raid_set *rs = ti->private; 1590 unsigned i; 1591 int ret = 0; 1592 1593 for (i = 0; !ret && i < rs->md.raid_disks; i++) 1594 if (rs->dev[i].data_dev) 1595 ret = fn(ti, 1596 rs->dev[i].data_dev, 1597 0, /* No offset on data devs */ 1598 rs->md.dev_sectors, 1599 data); 1600 1601 return ret; 1602 } 1603 1604 static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) 1605 { 1606 struct raid_set *rs = ti->private; 1607 unsigned chunk_size = rs->md.chunk_sectors << 9; 1608 struct r5conf *conf = rs->md.private; 1609 1610 blk_limits_io_min(limits, chunk_size); 1611 blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded)); 1612 } 1613 1614 static void raid_presuspend(struct dm_target *ti) 1615 { 1616 struct raid_set *rs = ti->private; 1617 1618 md_stop_writes(&rs->md); 1619 } 1620 1621 static void raid_postsuspend(struct dm_target *ti) 1622 { 1623 struct raid_set *rs = ti->private; 1624 1625 mddev_suspend(&rs->md); 1626 } 1627 1628 static void attempt_restore_of_faulty_devices(struct raid_set *rs) 1629 { 1630 int i; 1631 uint64_t failed_devices, cleared_failed_devices = 0; 1632 unsigned long flags; 1633 struct dm_raid_superblock *sb; 1634 struct md_rdev *r; 1635 1636 for (i = 0; i < rs->md.raid_disks; i++) { 1637 r = &rs->dev[i].rdev; 1638 if (test_bit(Faulty, &r->flags) && r->sb_page && 1639 sync_page_io(r, 0, r->sb_size, r->sb_page, READ, 1)) { 1640 DMINFO("Faulty %s device #%d has readable super block." 1641 " Attempting to revive it.", 1642 rs->raid_type->name, i); 1643 1644 /* 1645 * Faulty bit may be set, but sometimes the array can 1646 * be suspended before the personalities can respond 1647 * by removing the device from the array (i.e. calling 1648 * 'hot_remove_disk'). If they haven't yet removed 1649 * the failed device, its 'raid_disk' number will be 1650 * '>= 0' - meaning we must call this function 1651 * ourselves. 1652 */ 1653 if ((r->raid_disk >= 0) && 1654 (r->mddev->pers->hot_remove_disk(r->mddev, r) != 0)) 1655 /* Failed to revive this device, try next */ 1656 continue; 1657 1658 r->raid_disk = i; 1659 r->saved_raid_disk = i; 1660 flags = r->flags; 1661 clear_bit(Faulty, &r->flags); 1662 clear_bit(WriteErrorSeen, &r->flags); 1663 clear_bit(In_sync, &r->flags); 1664 if (r->mddev->pers->hot_add_disk(r->mddev, r)) { 1665 r->raid_disk = -1; 1666 r->saved_raid_disk = -1; 1667 r->flags = flags; 1668 } else { 1669 r->recovery_offset = 0; 1670 cleared_failed_devices |= 1 << i; 1671 } 1672 } 1673 } 1674 if (cleared_failed_devices) { 1675 rdev_for_each(r, &rs->md) { 1676 sb = page_address(r->sb_page); 1677 failed_devices = le64_to_cpu(sb->failed_devices); 1678 failed_devices &= ~cleared_failed_devices; 1679 sb->failed_devices = cpu_to_le64(failed_devices); 1680 } 1681 } 1682 } 1683 1684 static void raid_resume(struct dm_target *ti) 1685 { 1686 struct raid_set *rs = ti->private; 1687 1688 set_bit(MD_CHANGE_DEVS, &rs->md.flags); 1689 if (!rs->bitmap_loaded) { 1690 bitmap_load(&rs->md); 1691 rs->bitmap_loaded = 1; 1692 } else { 1693 /* 1694 * A secondary resume while the device is active. 1695 * Take this opportunity to check whether any failed 1696 * devices are reachable again. 1697 */ 1698 attempt_restore_of_faulty_devices(rs); 1699 } 1700 1701 clear_bit(MD_RECOVERY_FROZEN, &rs->md.recovery); 1702 mddev_resume(&rs->md); 1703 } 1704 1705 static struct target_type raid_target = { 1706 .name = "raid", 1707 .version = {1, 6, 0}, 1708 .module = THIS_MODULE, 1709 .ctr = raid_ctr, 1710 .dtr = raid_dtr, 1711 .map = raid_map, 1712 .status = raid_status, 1713 .message = raid_message, 1714 .iterate_devices = raid_iterate_devices, 1715 .io_hints = raid_io_hints, 1716 .presuspend = raid_presuspend, 1717 .postsuspend = raid_postsuspend, 1718 .resume = raid_resume, 1719 }; 1720 1721 static int __init dm_raid_init(void) 1722 { 1723 DMINFO("Loading target version %u.%u.%u", 1724 raid_target.version[0], 1725 raid_target.version[1], 1726 raid_target.version[2]); 1727 return dm_register_target(&raid_target); 1728 } 1729 1730 static void __exit dm_raid_exit(void) 1731 { 1732 dm_unregister_target(&raid_target); 1733 } 1734 1735 module_init(dm_raid_init); 1736 module_exit(dm_raid_exit); 1737 1738 module_param(devices_handle_discard_safely, bool, 0644); 1739 MODULE_PARM_DESC(devices_handle_discard_safely, 1740 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions"); 1741 1742 MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target"); 1743 MODULE_ALIAS("dm-raid1"); 1744 MODULE_ALIAS("dm-raid10"); 1745 MODULE_ALIAS("dm-raid4"); 1746 MODULE_ALIAS("dm-raid5"); 1747 MODULE_ALIAS("dm-raid6"); 1748 MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>"); 1749 MODULE_LICENSE("GPL"); 1750