1 /* 2 * Copyright (C) 2010-2011 Neil Brown 3 * Copyright (C) 2010-2011 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/module.h> 10 11 #include "md.h" 12 #include "raid1.h" 13 #include "raid5.h" 14 #include "raid10.h" 15 #include "bitmap.h" 16 17 #include <linux/device-mapper.h> 18 19 #define DM_MSG_PREFIX "raid" 20 21 /* 22 * The following flags are used by dm-raid.c to set up the array state. 23 * They must be cleared before md_run is called. 24 */ 25 #define FirstUse 10 /* rdev flag */ 26 27 struct raid_dev { 28 /* 29 * Two DM devices, one to hold metadata and one to hold the 30 * actual data/parity. The reason for this is to not confuse 31 * ti->len and give more flexibility in altering size and 32 * characteristics. 33 * 34 * While it is possible for this device to be associated 35 * with a different physical device than the data_dev, it 36 * is intended for it to be the same. 37 * |--------- Physical Device ---------| 38 * |- meta_dev -|------ data_dev ------| 39 */ 40 struct dm_dev *meta_dev; 41 struct dm_dev *data_dev; 42 struct md_rdev rdev; 43 }; 44 45 /* 46 * Flags for rs->print_flags field. 47 */ 48 #define DMPF_SYNC 0x1 49 #define DMPF_NOSYNC 0x2 50 #define DMPF_REBUILD 0x4 51 #define DMPF_DAEMON_SLEEP 0x8 52 #define DMPF_MIN_RECOVERY_RATE 0x10 53 #define DMPF_MAX_RECOVERY_RATE 0x20 54 #define DMPF_MAX_WRITE_BEHIND 0x40 55 #define DMPF_STRIPE_CACHE 0x80 56 #define DMPF_REGION_SIZE 0x100 57 #define DMPF_RAID10_COPIES 0x200 58 #define DMPF_RAID10_FORMAT 0x400 59 60 struct raid_set { 61 struct dm_target *ti; 62 63 uint32_t bitmap_loaded; 64 uint32_t print_flags; 65 66 struct mddev md; 67 struct raid_type *raid_type; 68 struct dm_target_callbacks callbacks; 69 70 struct raid_dev dev[0]; 71 }; 72 73 /* Supported raid types and properties. */ 74 static struct raid_type { 75 const char *name; /* RAID algorithm. */ 76 const char *descr; /* Descriptor text for logging. */ 77 const unsigned parity_devs; /* # of parity devices. */ 78 const unsigned minimal_devs; /* minimal # of devices in set. */ 79 const unsigned level; /* RAID level. */ 80 const unsigned algorithm; /* RAID algorithm. */ 81 } raid_types[] = { 82 {"raid1", "RAID1 (mirroring)", 0, 2, 1, 0 /* NONE */}, 83 {"raid10", "RAID10 (striped mirrors)", 0, 2, 10, UINT_MAX /* Varies */}, 84 {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, 85 {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC}, 86 {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC}, 87 {"raid5_ls", "RAID5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC}, 88 {"raid5_rs", "RAID5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC}, 89 {"raid6_zr", "RAID6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART}, 90 {"raid6_nr", "RAID6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART}, 91 {"raid6_nc", "RAID6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE} 92 }; 93 94 static unsigned raid10_md_layout_to_copies(int layout) 95 { 96 return layout & 0xFF; 97 } 98 99 static int raid10_format_to_md_layout(char *format, unsigned copies) 100 { 101 /* 1 "far" copy, and 'copies' "near" copies */ 102 return (1 << 8) | (copies & 0xFF); 103 } 104 105 static struct raid_type *get_raid_type(char *name) 106 { 107 int i; 108 109 for (i = 0; i < ARRAY_SIZE(raid_types); i++) 110 if (!strcmp(raid_types[i].name, name)) 111 return &raid_types[i]; 112 113 return NULL; 114 } 115 116 static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *raid_type, unsigned raid_devs) 117 { 118 unsigned i; 119 struct raid_set *rs; 120 121 if (raid_devs <= raid_type->parity_devs) { 122 ti->error = "Insufficient number of devices"; 123 return ERR_PTR(-EINVAL); 124 } 125 126 rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL); 127 if (!rs) { 128 ti->error = "Cannot allocate raid context"; 129 return ERR_PTR(-ENOMEM); 130 } 131 132 mddev_init(&rs->md); 133 134 rs->ti = ti; 135 rs->raid_type = raid_type; 136 rs->md.raid_disks = raid_devs; 137 rs->md.level = raid_type->level; 138 rs->md.new_level = rs->md.level; 139 rs->md.layout = raid_type->algorithm; 140 rs->md.new_layout = rs->md.layout; 141 rs->md.delta_disks = 0; 142 rs->md.recovery_cp = 0; 143 144 for (i = 0; i < raid_devs; i++) 145 md_rdev_init(&rs->dev[i].rdev); 146 147 /* 148 * Remaining items to be initialized by further RAID params: 149 * rs->md.persistent 150 * rs->md.external 151 * rs->md.chunk_sectors 152 * rs->md.new_chunk_sectors 153 * rs->md.dev_sectors 154 */ 155 156 return rs; 157 } 158 159 static void context_free(struct raid_set *rs) 160 { 161 int i; 162 163 for (i = 0; i < rs->md.raid_disks; i++) { 164 if (rs->dev[i].meta_dev) 165 dm_put_device(rs->ti, rs->dev[i].meta_dev); 166 md_rdev_clear(&rs->dev[i].rdev); 167 if (rs->dev[i].data_dev) 168 dm_put_device(rs->ti, rs->dev[i].data_dev); 169 } 170 171 kfree(rs); 172 } 173 174 /* 175 * For every device we have two words 176 * <meta_dev>: meta device name or '-' if missing 177 * <data_dev>: data device name or '-' if missing 178 * 179 * The following are permitted: 180 * - - 181 * - <data_dev> 182 * <meta_dev> <data_dev> 183 * 184 * The following is not allowed: 185 * <meta_dev> - 186 * 187 * This code parses those words. If there is a failure, 188 * the caller must use context_free to unwind the operations. 189 */ 190 static int dev_parms(struct raid_set *rs, char **argv) 191 { 192 int i; 193 int rebuild = 0; 194 int metadata_available = 0; 195 int ret = 0; 196 197 for (i = 0; i < rs->md.raid_disks; i++, argv += 2) { 198 rs->dev[i].rdev.raid_disk = i; 199 200 rs->dev[i].meta_dev = NULL; 201 rs->dev[i].data_dev = NULL; 202 203 /* 204 * There are no offsets, since there is a separate device 205 * for data and metadata. 206 */ 207 rs->dev[i].rdev.data_offset = 0; 208 rs->dev[i].rdev.mddev = &rs->md; 209 210 if (strcmp(argv[0], "-")) { 211 ret = dm_get_device(rs->ti, argv[0], 212 dm_table_get_mode(rs->ti->table), 213 &rs->dev[i].meta_dev); 214 rs->ti->error = "RAID metadata device lookup failure"; 215 if (ret) 216 return ret; 217 218 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL); 219 if (!rs->dev[i].rdev.sb_page) 220 return -ENOMEM; 221 } 222 223 if (!strcmp(argv[1], "-")) { 224 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) && 225 (!rs->dev[i].rdev.recovery_offset)) { 226 rs->ti->error = "Drive designated for rebuild not specified"; 227 return -EINVAL; 228 } 229 230 rs->ti->error = "No data device supplied with metadata device"; 231 if (rs->dev[i].meta_dev) 232 return -EINVAL; 233 234 continue; 235 } 236 237 ret = dm_get_device(rs->ti, argv[1], 238 dm_table_get_mode(rs->ti->table), 239 &rs->dev[i].data_dev); 240 if (ret) { 241 rs->ti->error = "RAID device lookup failure"; 242 return ret; 243 } 244 245 if (rs->dev[i].meta_dev) { 246 metadata_available = 1; 247 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev; 248 } 249 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev; 250 list_add(&rs->dev[i].rdev.same_set, &rs->md.disks); 251 if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) 252 rebuild++; 253 } 254 255 if (metadata_available) { 256 rs->md.external = 0; 257 rs->md.persistent = 1; 258 rs->md.major_version = 2; 259 } else if (rebuild && !rs->md.recovery_cp) { 260 /* 261 * Without metadata, we will not be able to tell if the array 262 * is in-sync or not - we must assume it is not. Therefore, 263 * it is impossible to rebuild a drive. 264 * 265 * Even if there is metadata, the on-disk information may 266 * indicate that the array is not in-sync and it will then 267 * fail at that time. 268 * 269 * User could specify 'nosync' option if desperate. 270 */ 271 DMERR("Unable to rebuild drive while array is not in-sync"); 272 rs->ti->error = "RAID device lookup failure"; 273 return -EINVAL; 274 } 275 276 return 0; 277 } 278 279 /* 280 * validate_region_size 281 * @rs 282 * @region_size: region size in sectors. If 0, pick a size (4MiB default). 283 * 284 * Set rs->md.bitmap_info.chunksize (which really refers to 'region size'). 285 * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap. 286 * 287 * Returns: 0 on success, -EINVAL on failure. 288 */ 289 static int validate_region_size(struct raid_set *rs, unsigned long region_size) 290 { 291 unsigned long min_region_size = rs->ti->len / (1 << 21); 292 293 if (!region_size) { 294 /* 295 * Choose a reasonable default. All figures in sectors. 296 */ 297 if (min_region_size > (1 << 13)) { 298 /* If not a power of 2, make it the next power of 2 */ 299 if (min_region_size & (min_region_size - 1)) 300 region_size = 1 << fls(region_size); 301 DMINFO("Choosing default region size of %lu sectors", 302 region_size); 303 } else { 304 DMINFO("Choosing default region size of 4MiB"); 305 region_size = 1 << 13; /* sectors */ 306 } 307 } else { 308 /* 309 * Validate user-supplied value. 310 */ 311 if (region_size > rs->ti->len) { 312 rs->ti->error = "Supplied region size is too large"; 313 return -EINVAL; 314 } 315 316 if (region_size < min_region_size) { 317 DMERR("Supplied region_size (%lu sectors) below minimum (%lu)", 318 region_size, min_region_size); 319 rs->ti->error = "Supplied region size is too small"; 320 return -EINVAL; 321 } 322 323 if (!is_power_of_2(region_size)) { 324 rs->ti->error = "Region size is not a power of 2"; 325 return -EINVAL; 326 } 327 328 if (region_size < rs->md.chunk_sectors) { 329 rs->ti->error = "Region size is smaller than the chunk size"; 330 return -EINVAL; 331 } 332 } 333 334 /* 335 * Convert sectors to bytes. 336 */ 337 rs->md.bitmap_info.chunksize = (region_size << 9); 338 339 return 0; 340 } 341 342 /* 343 * validate_raid_redundancy 344 * @rs 345 * 346 * Determine if there are enough devices in the array that haven't 347 * failed (or are being rebuilt) to form a usable array. 348 * 349 * Returns: 0 on success, -EINVAL on failure. 350 */ 351 static int validate_raid_redundancy(struct raid_set *rs) 352 { 353 unsigned i, rebuild_cnt = 0; 354 unsigned rebuilds_per_group, copies, d; 355 356 for (i = 0; i < rs->md.raid_disks; i++) 357 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) || 358 !rs->dev[i].rdev.sb_page) 359 rebuild_cnt++; 360 361 switch (rs->raid_type->level) { 362 case 1: 363 if (rebuild_cnt >= rs->md.raid_disks) 364 goto too_many; 365 break; 366 case 4: 367 case 5: 368 case 6: 369 if (rebuild_cnt > rs->raid_type->parity_devs) 370 goto too_many; 371 break; 372 case 10: 373 copies = raid10_md_layout_to_copies(rs->md.layout); 374 if (rebuild_cnt < copies) 375 break; 376 377 /* 378 * It is possible to have a higher rebuild count for RAID10, 379 * as long as the failed devices occur in different mirror 380 * groups (i.e. different stripes). 381 * 382 * Right now, we only allow for "near" copies. When other 383 * formats are added, we will have to check those too. 384 * 385 * When checking "near" format, make sure no adjacent devices 386 * have failed beyond what can be handled. In addition to the 387 * simple case where the number of devices is a multiple of the 388 * number of copies, we must also handle cases where the number 389 * of devices is not a multiple of the number of copies. 390 * E.g. dev1 dev2 dev3 dev4 dev5 391 * A A B B C 392 * C D D E E 393 */ 394 for (i = 0; i < rs->md.raid_disks * copies; i++) { 395 if (!(i % copies)) 396 rebuilds_per_group = 0; 397 d = i % rs->md.raid_disks; 398 if ((!rs->dev[d].rdev.sb_page || 399 !test_bit(In_sync, &rs->dev[d].rdev.flags)) && 400 (++rebuilds_per_group >= copies)) 401 goto too_many; 402 } 403 break; 404 default: 405 if (rebuild_cnt) 406 return -EINVAL; 407 } 408 409 return 0; 410 411 too_many: 412 return -EINVAL; 413 } 414 415 /* 416 * Possible arguments are... 417 * <chunk_size> [optional_args] 418 * 419 * Argument definitions 420 * <chunk_size> The number of sectors per disk that 421 * will form the "stripe" 422 * [[no]sync] Force or prevent recovery of the 423 * entire array 424 * [rebuild <idx>] Rebuild the drive indicated by the index 425 * [daemon_sleep <ms>] Time between bitmap daemon work to 426 * clear bits 427 * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization 428 * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization 429 * [write_mostly <idx>] Indicate a write mostly drive via index 430 * [max_write_behind <sectors>] See '-write-behind=' (man mdadm) 431 * [stripe_cache <sectors>] Stripe cache size for higher RAIDs 432 * [region_size <sectors>] Defines granularity of bitmap 433 * 434 * RAID10-only options: 435 * [raid10_copies <# copies>] Number of copies. (Default: 2) 436 * [raid10_format <near>] Layout algorithm. (Default: near) 437 */ 438 static int parse_raid_params(struct raid_set *rs, char **argv, 439 unsigned num_raid_params) 440 { 441 char *raid10_format = "near"; 442 unsigned raid10_copies = 2; 443 unsigned i; 444 unsigned long value, region_size = 0; 445 sector_t sectors_per_dev = rs->ti->len; 446 sector_t max_io_len; 447 char *key; 448 449 /* 450 * First, parse the in-order required arguments 451 * "chunk_size" is the only argument of this type. 452 */ 453 if ((strict_strtoul(argv[0], 10, &value) < 0)) { 454 rs->ti->error = "Bad chunk size"; 455 return -EINVAL; 456 } else if (rs->raid_type->level == 1) { 457 if (value) 458 DMERR("Ignoring chunk size parameter for RAID 1"); 459 value = 0; 460 } else if (!is_power_of_2(value)) { 461 rs->ti->error = "Chunk size must be a power of 2"; 462 return -EINVAL; 463 } else if (value < 8) { 464 rs->ti->error = "Chunk size value is too small"; 465 return -EINVAL; 466 } 467 468 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; 469 argv++; 470 num_raid_params--; 471 472 /* 473 * We set each individual device as In_sync with a completed 474 * 'recovery_offset'. If there has been a device failure or 475 * replacement then one of the following cases applies: 476 * 477 * 1) User specifies 'rebuild'. 478 * - Device is reset when param is read. 479 * 2) A new device is supplied. 480 * - No matching superblock found, resets device. 481 * 3) Device failure was transient and returns on reload. 482 * - Failure noticed, resets device for bitmap replay. 483 * 4) Device hadn't completed recovery after previous failure. 484 * - Superblock is read and overrides recovery_offset. 485 * 486 * What is found in the superblocks of the devices is always 487 * authoritative, unless 'rebuild' or '[no]sync' was specified. 488 */ 489 for (i = 0; i < rs->md.raid_disks; i++) { 490 set_bit(In_sync, &rs->dev[i].rdev.flags); 491 rs->dev[i].rdev.recovery_offset = MaxSector; 492 } 493 494 /* 495 * Second, parse the unordered optional arguments 496 */ 497 for (i = 0; i < num_raid_params; i++) { 498 if (!strcasecmp(argv[i], "nosync")) { 499 rs->md.recovery_cp = MaxSector; 500 rs->print_flags |= DMPF_NOSYNC; 501 continue; 502 } 503 if (!strcasecmp(argv[i], "sync")) { 504 rs->md.recovery_cp = 0; 505 rs->print_flags |= DMPF_SYNC; 506 continue; 507 } 508 509 /* The rest of the optional arguments come in key/value pairs */ 510 if ((i + 1) >= num_raid_params) { 511 rs->ti->error = "Wrong number of raid parameters given"; 512 return -EINVAL; 513 } 514 515 key = argv[i++]; 516 517 /* Parameters that take a string value are checked here. */ 518 if (!strcasecmp(key, "raid10_format")) { 519 if (rs->raid_type->level != 10) { 520 rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type"; 521 return -EINVAL; 522 } 523 if (strcmp("near", argv[i])) { 524 rs->ti->error = "Invalid 'raid10_format' value given"; 525 return -EINVAL; 526 } 527 raid10_format = argv[i]; 528 rs->print_flags |= DMPF_RAID10_FORMAT; 529 continue; 530 } 531 532 if (strict_strtoul(argv[i], 10, &value) < 0) { 533 rs->ti->error = "Bad numerical argument given in raid params"; 534 return -EINVAL; 535 } 536 537 /* Parameters that take a numeric value are checked here */ 538 if (!strcasecmp(key, "rebuild")) { 539 if (value >= rs->md.raid_disks) { 540 rs->ti->error = "Invalid rebuild index given"; 541 return -EINVAL; 542 } 543 clear_bit(In_sync, &rs->dev[value].rdev.flags); 544 rs->dev[value].rdev.recovery_offset = 0; 545 rs->print_flags |= DMPF_REBUILD; 546 } else if (!strcasecmp(key, "write_mostly")) { 547 if (rs->raid_type->level != 1) { 548 rs->ti->error = "write_mostly option is only valid for RAID1"; 549 return -EINVAL; 550 } 551 if (value >= rs->md.raid_disks) { 552 rs->ti->error = "Invalid write_mostly drive index given"; 553 return -EINVAL; 554 } 555 set_bit(WriteMostly, &rs->dev[value].rdev.flags); 556 } else if (!strcasecmp(key, "max_write_behind")) { 557 if (rs->raid_type->level != 1) { 558 rs->ti->error = "max_write_behind option is only valid for RAID1"; 559 return -EINVAL; 560 } 561 rs->print_flags |= DMPF_MAX_WRITE_BEHIND; 562 563 /* 564 * In device-mapper, we specify things in sectors, but 565 * MD records this value in kB 566 */ 567 value /= 2; 568 if (value > COUNTER_MAX) { 569 rs->ti->error = "Max write-behind limit out of range"; 570 return -EINVAL; 571 } 572 rs->md.bitmap_info.max_write_behind = value; 573 } else if (!strcasecmp(key, "daemon_sleep")) { 574 rs->print_flags |= DMPF_DAEMON_SLEEP; 575 if (!value || (value > MAX_SCHEDULE_TIMEOUT)) { 576 rs->ti->error = "daemon sleep period out of range"; 577 return -EINVAL; 578 } 579 rs->md.bitmap_info.daemon_sleep = value; 580 } else if (!strcasecmp(key, "stripe_cache")) { 581 rs->print_flags |= DMPF_STRIPE_CACHE; 582 583 /* 584 * In device-mapper, we specify things in sectors, but 585 * MD records this value in kB 586 */ 587 value /= 2; 588 589 if ((rs->raid_type->level != 5) && 590 (rs->raid_type->level != 6)) { 591 rs->ti->error = "Inappropriate argument: stripe_cache"; 592 return -EINVAL; 593 } 594 if (raid5_set_cache_size(&rs->md, (int)value)) { 595 rs->ti->error = "Bad stripe_cache size"; 596 return -EINVAL; 597 } 598 } else if (!strcasecmp(key, "min_recovery_rate")) { 599 rs->print_flags |= DMPF_MIN_RECOVERY_RATE; 600 if (value > INT_MAX) { 601 rs->ti->error = "min_recovery_rate out of range"; 602 return -EINVAL; 603 } 604 rs->md.sync_speed_min = (int)value; 605 } else if (!strcasecmp(key, "max_recovery_rate")) { 606 rs->print_flags |= DMPF_MAX_RECOVERY_RATE; 607 if (value > INT_MAX) { 608 rs->ti->error = "max_recovery_rate out of range"; 609 return -EINVAL; 610 } 611 rs->md.sync_speed_max = (int)value; 612 } else if (!strcasecmp(key, "region_size")) { 613 rs->print_flags |= DMPF_REGION_SIZE; 614 region_size = value; 615 } else if (!strcasecmp(key, "raid10_copies") && 616 (rs->raid_type->level == 10)) { 617 if ((value < 2) || (value > 0xFF)) { 618 rs->ti->error = "Bad value for 'raid10_copies'"; 619 return -EINVAL; 620 } 621 rs->print_flags |= DMPF_RAID10_COPIES; 622 raid10_copies = value; 623 } else { 624 DMERR("Unable to parse RAID parameter: %s", key); 625 rs->ti->error = "Unable to parse RAID parameters"; 626 return -EINVAL; 627 } 628 } 629 630 if (validate_region_size(rs, region_size)) 631 return -EINVAL; 632 633 if (rs->md.chunk_sectors) 634 max_io_len = rs->md.chunk_sectors; 635 else 636 max_io_len = region_size; 637 638 if (dm_set_target_max_io_len(rs->ti, max_io_len)) 639 return -EINVAL; 640 641 if (rs->raid_type->level == 10) { 642 if (raid10_copies > rs->md.raid_disks) { 643 rs->ti->error = "Not enough devices to satisfy specification"; 644 return -EINVAL; 645 } 646 647 /* (Len * #mirrors) / #devices */ 648 sectors_per_dev = rs->ti->len * raid10_copies; 649 sector_div(sectors_per_dev, rs->md.raid_disks); 650 651 rs->md.layout = raid10_format_to_md_layout(raid10_format, 652 raid10_copies); 653 rs->md.new_layout = rs->md.layout; 654 } else if ((rs->raid_type->level > 1) && 655 sector_div(sectors_per_dev, 656 (rs->md.raid_disks - rs->raid_type->parity_devs))) { 657 rs->ti->error = "Target length not divisible by number of data devices"; 658 return -EINVAL; 659 } 660 rs->md.dev_sectors = sectors_per_dev; 661 662 /* Assume there are no metadata devices until the drives are parsed */ 663 rs->md.persistent = 0; 664 rs->md.external = 1; 665 666 return 0; 667 } 668 669 static void do_table_event(struct work_struct *ws) 670 { 671 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work); 672 673 dm_table_event(rs->ti->table); 674 } 675 676 static int raid_is_congested(struct dm_target_callbacks *cb, int bits) 677 { 678 struct raid_set *rs = container_of(cb, struct raid_set, callbacks); 679 680 if (rs->raid_type->level == 1) 681 return md_raid1_congested(&rs->md, bits); 682 683 if (rs->raid_type->level == 10) 684 return md_raid10_congested(&rs->md, bits); 685 686 return md_raid5_congested(&rs->md, bits); 687 } 688 689 /* 690 * This structure is never routinely used by userspace, unlike md superblocks. 691 * Devices with this superblock should only ever be accessed via device-mapper. 692 */ 693 #define DM_RAID_MAGIC 0x64526D44 694 struct dm_raid_superblock { 695 __le32 magic; /* "DmRd" */ 696 __le32 features; /* Used to indicate possible future changes */ 697 698 __le32 num_devices; /* Number of devices in this array. (Max 64) */ 699 __le32 array_position; /* The position of this drive in the array */ 700 701 __le64 events; /* Incremented by md when superblock updated */ 702 __le64 failed_devices; /* Bit field of devices to indicate failures */ 703 704 /* 705 * This offset tracks the progress of the repair or replacement of 706 * an individual drive. 707 */ 708 __le64 disk_recovery_offset; 709 710 /* 711 * This offset tracks the progress of the initial array 712 * synchronisation/parity calculation. 713 */ 714 __le64 array_resync_offset; 715 716 /* 717 * RAID characteristics 718 */ 719 __le32 level; 720 __le32 layout; 721 __le32 stripe_sectors; 722 723 __u8 pad[452]; /* Round struct to 512 bytes. */ 724 /* Always set to 0 when writing. */ 725 } __packed; 726 727 static int read_disk_sb(struct md_rdev *rdev, int size) 728 { 729 BUG_ON(!rdev->sb_page); 730 731 if (rdev->sb_loaded) 732 return 0; 733 734 if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) { 735 DMERR("Failed to read superblock of device at position %d", 736 rdev->raid_disk); 737 md_error(rdev->mddev, rdev); 738 return -EINVAL; 739 } 740 741 rdev->sb_loaded = 1; 742 743 return 0; 744 } 745 746 static void super_sync(struct mddev *mddev, struct md_rdev *rdev) 747 { 748 int i; 749 uint64_t failed_devices; 750 struct dm_raid_superblock *sb; 751 struct raid_set *rs = container_of(mddev, struct raid_set, md); 752 753 sb = page_address(rdev->sb_page); 754 failed_devices = le64_to_cpu(sb->failed_devices); 755 756 for (i = 0; i < mddev->raid_disks; i++) 757 if (!rs->dev[i].data_dev || 758 test_bit(Faulty, &(rs->dev[i].rdev.flags))) 759 failed_devices |= (1ULL << i); 760 761 memset(sb, 0, sizeof(*sb)); 762 763 sb->magic = cpu_to_le32(DM_RAID_MAGIC); 764 sb->features = cpu_to_le32(0); /* No features yet */ 765 766 sb->num_devices = cpu_to_le32(mddev->raid_disks); 767 sb->array_position = cpu_to_le32(rdev->raid_disk); 768 769 sb->events = cpu_to_le64(mddev->events); 770 sb->failed_devices = cpu_to_le64(failed_devices); 771 772 sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset); 773 sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp); 774 775 sb->level = cpu_to_le32(mddev->level); 776 sb->layout = cpu_to_le32(mddev->layout); 777 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); 778 } 779 780 /* 781 * super_load 782 * 783 * This function creates a superblock if one is not found on the device 784 * and will decide which superblock to use if there's a choice. 785 * 786 * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise 787 */ 788 static int super_load(struct md_rdev *rdev, struct md_rdev *refdev) 789 { 790 int ret; 791 struct dm_raid_superblock *sb; 792 struct dm_raid_superblock *refsb; 793 uint64_t events_sb, events_refsb; 794 795 rdev->sb_start = 0; 796 rdev->sb_size = sizeof(*sb); 797 798 ret = read_disk_sb(rdev, rdev->sb_size); 799 if (ret) 800 return ret; 801 802 sb = page_address(rdev->sb_page); 803 804 /* 805 * Two cases that we want to write new superblocks and rebuild: 806 * 1) New device (no matching magic number) 807 * 2) Device specified for rebuild (!In_sync w/ offset == 0) 808 */ 809 if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) || 810 (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) { 811 super_sync(rdev->mddev, rdev); 812 813 set_bit(FirstUse, &rdev->flags); 814 815 /* Force writing of superblocks to disk */ 816 set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags); 817 818 /* Any superblock is better than none, choose that if given */ 819 return refdev ? 0 : 1; 820 } 821 822 if (!refdev) 823 return 1; 824 825 events_sb = le64_to_cpu(sb->events); 826 827 refsb = page_address(refdev->sb_page); 828 events_refsb = le64_to_cpu(refsb->events); 829 830 return (events_sb > events_refsb) ? 1 : 0; 831 } 832 833 static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev) 834 { 835 int role; 836 struct raid_set *rs = container_of(mddev, struct raid_set, md); 837 uint64_t events_sb; 838 uint64_t failed_devices; 839 struct dm_raid_superblock *sb; 840 uint32_t new_devs = 0; 841 uint32_t rebuilds = 0; 842 struct md_rdev *r; 843 struct dm_raid_superblock *sb2; 844 845 sb = page_address(rdev->sb_page); 846 events_sb = le64_to_cpu(sb->events); 847 failed_devices = le64_to_cpu(sb->failed_devices); 848 849 /* 850 * Initialise to 1 if this is a new superblock. 851 */ 852 mddev->events = events_sb ? : 1; 853 854 /* 855 * Reshaping is not currently allowed 856 */ 857 if ((le32_to_cpu(sb->level) != mddev->level) || 858 (le32_to_cpu(sb->layout) != mddev->layout) || 859 (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors)) { 860 DMERR("Reshaping arrays not yet supported."); 861 return -EINVAL; 862 } 863 864 /* We can only change the number of devices in RAID1 right now */ 865 if ((rs->raid_type->level != 1) && 866 (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) { 867 DMERR("Reshaping arrays not yet supported."); 868 return -EINVAL; 869 } 870 871 if (!(rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC))) 872 mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset); 873 874 /* 875 * During load, we set FirstUse if a new superblock was written. 876 * There are two reasons we might not have a superblock: 877 * 1) The array is brand new - in which case, all of the 878 * devices must have their In_sync bit set. Also, 879 * recovery_cp must be 0, unless forced. 880 * 2) This is a new device being added to an old array 881 * and the new device needs to be rebuilt - in which 882 * case the In_sync bit will /not/ be set and 883 * recovery_cp must be MaxSector. 884 */ 885 rdev_for_each(r, mddev) { 886 if (!test_bit(In_sync, &r->flags)) { 887 DMINFO("Device %d specified for rebuild: " 888 "Clearing superblock", r->raid_disk); 889 rebuilds++; 890 } else if (test_bit(FirstUse, &r->flags)) 891 new_devs++; 892 } 893 894 if (!rebuilds) { 895 if (new_devs == mddev->raid_disks) { 896 DMINFO("Superblocks created for new array"); 897 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags); 898 } else if (new_devs) { 899 DMERR("New device injected " 900 "into existing array without 'rebuild' " 901 "parameter specified"); 902 return -EINVAL; 903 } 904 } else if (new_devs) { 905 DMERR("'rebuild' devices cannot be " 906 "injected into an array with other first-time devices"); 907 return -EINVAL; 908 } else if (mddev->recovery_cp != MaxSector) { 909 DMERR("'rebuild' specified while array is not in-sync"); 910 return -EINVAL; 911 } 912 913 /* 914 * Now we set the Faulty bit for those devices that are 915 * recorded in the superblock as failed. 916 */ 917 rdev_for_each(r, mddev) { 918 if (!r->sb_page) 919 continue; 920 sb2 = page_address(r->sb_page); 921 sb2->failed_devices = 0; 922 923 /* 924 * Check for any device re-ordering. 925 */ 926 if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) { 927 role = le32_to_cpu(sb2->array_position); 928 if (role != r->raid_disk) { 929 if (rs->raid_type->level != 1) { 930 rs->ti->error = "Cannot change device " 931 "positions in RAID array"; 932 return -EINVAL; 933 } 934 DMINFO("RAID1 device #%d now at position #%d", 935 role, r->raid_disk); 936 } 937 938 /* 939 * Partial recovery is performed on 940 * returning failed devices. 941 */ 942 if (failed_devices & (1 << role)) 943 set_bit(Faulty, &r->flags); 944 } 945 } 946 947 return 0; 948 } 949 950 static int super_validate(struct mddev *mddev, struct md_rdev *rdev) 951 { 952 struct dm_raid_superblock *sb = page_address(rdev->sb_page); 953 954 /* 955 * If mddev->events is not set, we know we have not yet initialized 956 * the array. 957 */ 958 if (!mddev->events && super_init_validation(mddev, rdev)) 959 return -EINVAL; 960 961 mddev->bitmap_info.offset = 4096 >> 9; /* Enable bitmap creation */ 962 rdev->mddev->bitmap_info.default_offset = 4096 >> 9; 963 if (!test_bit(FirstUse, &rdev->flags)) { 964 rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset); 965 if (rdev->recovery_offset != MaxSector) 966 clear_bit(In_sync, &rdev->flags); 967 } 968 969 /* 970 * If a device comes back, set it as not In_sync and no longer faulty. 971 */ 972 if (test_bit(Faulty, &rdev->flags)) { 973 clear_bit(Faulty, &rdev->flags); 974 clear_bit(In_sync, &rdev->flags); 975 rdev->saved_raid_disk = rdev->raid_disk; 976 rdev->recovery_offset = 0; 977 } 978 979 clear_bit(FirstUse, &rdev->flags); 980 981 return 0; 982 } 983 984 /* 985 * Analyse superblocks and select the freshest. 986 */ 987 static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) 988 { 989 int ret; 990 struct raid_dev *dev; 991 struct md_rdev *rdev, *tmp, *freshest; 992 struct mddev *mddev = &rs->md; 993 994 freshest = NULL; 995 rdev_for_each_safe(rdev, tmp, mddev) { 996 /* 997 * Skipping super_load due to DMPF_SYNC will cause 998 * the array to undergo initialization again as 999 * though it were new. This is the intended effect 1000 * of the "sync" directive. 1001 * 1002 * When reshaping capability is added, we must ensure 1003 * that the "sync" directive is disallowed during the 1004 * reshape. 1005 */ 1006 if (rs->print_flags & DMPF_SYNC) 1007 continue; 1008 1009 if (!rdev->meta_bdev) 1010 continue; 1011 1012 ret = super_load(rdev, freshest); 1013 1014 switch (ret) { 1015 case 1: 1016 freshest = rdev; 1017 break; 1018 case 0: 1019 break; 1020 default: 1021 dev = container_of(rdev, struct raid_dev, rdev); 1022 if (dev->meta_dev) 1023 dm_put_device(ti, dev->meta_dev); 1024 1025 dev->meta_dev = NULL; 1026 rdev->meta_bdev = NULL; 1027 1028 if (rdev->sb_page) 1029 put_page(rdev->sb_page); 1030 1031 rdev->sb_page = NULL; 1032 1033 rdev->sb_loaded = 0; 1034 1035 /* 1036 * We might be able to salvage the data device 1037 * even though the meta device has failed. For 1038 * now, we behave as though '- -' had been 1039 * set for this device in the table. 1040 */ 1041 if (dev->data_dev) 1042 dm_put_device(ti, dev->data_dev); 1043 1044 dev->data_dev = NULL; 1045 rdev->bdev = NULL; 1046 1047 list_del(&rdev->same_set); 1048 } 1049 } 1050 1051 if (!freshest) 1052 return 0; 1053 1054 if (validate_raid_redundancy(rs)) { 1055 rs->ti->error = "Insufficient redundancy to activate array"; 1056 return -EINVAL; 1057 } 1058 1059 /* 1060 * Validation of the freshest device provides the source of 1061 * validation for the remaining devices. 1062 */ 1063 ti->error = "Unable to assemble array: Invalid superblocks"; 1064 if (super_validate(mddev, freshest)) 1065 return -EINVAL; 1066 1067 rdev_for_each(rdev, mddev) 1068 if ((rdev != freshest) && super_validate(mddev, rdev)) 1069 return -EINVAL; 1070 1071 return 0; 1072 } 1073 1074 /* 1075 * Construct a RAID4/5/6 mapping: 1076 * Args: 1077 * <raid_type> <#raid_params> <raid_params> \ 1078 * <#raid_devs> { <meta_dev1> <dev1> .. <meta_devN> <devN> } 1079 * 1080 * <raid_params> varies by <raid_type>. See 'parse_raid_params' for 1081 * details on possible <raid_params>. 1082 */ 1083 static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) 1084 { 1085 int ret; 1086 struct raid_type *rt; 1087 unsigned long num_raid_params, num_raid_devs; 1088 struct raid_set *rs = NULL; 1089 1090 /* Must have at least <raid_type> <#raid_params> */ 1091 if (argc < 2) { 1092 ti->error = "Too few arguments"; 1093 return -EINVAL; 1094 } 1095 1096 /* raid type */ 1097 rt = get_raid_type(argv[0]); 1098 if (!rt) { 1099 ti->error = "Unrecognised raid_type"; 1100 return -EINVAL; 1101 } 1102 argc--; 1103 argv++; 1104 1105 /* number of RAID parameters */ 1106 if (strict_strtoul(argv[0], 10, &num_raid_params) < 0) { 1107 ti->error = "Cannot understand number of RAID parameters"; 1108 return -EINVAL; 1109 } 1110 argc--; 1111 argv++; 1112 1113 /* Skip over RAID params for now and find out # of devices */ 1114 if (num_raid_params + 1 > argc) { 1115 ti->error = "Arguments do not agree with counts given"; 1116 return -EINVAL; 1117 } 1118 1119 if ((strict_strtoul(argv[num_raid_params], 10, &num_raid_devs) < 0) || 1120 (num_raid_devs >= INT_MAX)) { 1121 ti->error = "Cannot understand number of raid devices"; 1122 return -EINVAL; 1123 } 1124 1125 rs = context_alloc(ti, rt, (unsigned)num_raid_devs); 1126 if (IS_ERR(rs)) 1127 return PTR_ERR(rs); 1128 1129 ret = parse_raid_params(rs, argv, (unsigned)num_raid_params); 1130 if (ret) 1131 goto bad; 1132 1133 ret = -EINVAL; 1134 1135 argc -= num_raid_params + 1; /* +1: we already have num_raid_devs */ 1136 argv += num_raid_params + 1; 1137 1138 if (argc != (num_raid_devs * 2)) { 1139 ti->error = "Supplied RAID devices does not match the count given"; 1140 goto bad; 1141 } 1142 1143 ret = dev_parms(rs, argv); 1144 if (ret) 1145 goto bad; 1146 1147 rs->md.sync_super = super_sync; 1148 ret = analyse_superblocks(ti, rs); 1149 if (ret) 1150 goto bad; 1151 1152 INIT_WORK(&rs->md.event_work, do_table_event); 1153 ti->private = rs; 1154 ti->num_flush_requests = 1; 1155 1156 mutex_lock(&rs->md.reconfig_mutex); 1157 ret = md_run(&rs->md); 1158 rs->md.in_sync = 0; /* Assume already marked dirty */ 1159 mutex_unlock(&rs->md.reconfig_mutex); 1160 1161 if (ret) { 1162 ti->error = "Fail to run raid array"; 1163 goto bad; 1164 } 1165 1166 if (ti->len != rs->md.array_sectors) { 1167 ti->error = "Array size does not match requested target length"; 1168 ret = -EINVAL; 1169 goto size_mismatch; 1170 } 1171 rs->callbacks.congested_fn = raid_is_congested; 1172 dm_table_add_target_callbacks(ti->table, &rs->callbacks); 1173 1174 mddev_suspend(&rs->md); 1175 return 0; 1176 1177 size_mismatch: 1178 md_stop(&rs->md); 1179 bad: 1180 context_free(rs); 1181 1182 return ret; 1183 } 1184 1185 static void raid_dtr(struct dm_target *ti) 1186 { 1187 struct raid_set *rs = ti->private; 1188 1189 list_del_init(&rs->callbacks.list); 1190 md_stop(&rs->md); 1191 context_free(rs); 1192 } 1193 1194 static int raid_map(struct dm_target *ti, struct bio *bio) 1195 { 1196 struct raid_set *rs = ti->private; 1197 struct mddev *mddev = &rs->md; 1198 1199 mddev->pers->make_request(mddev, bio); 1200 1201 return DM_MAPIO_SUBMITTED; 1202 } 1203 1204 static int raid_status(struct dm_target *ti, status_type_t type, 1205 unsigned status_flags, char *result, unsigned maxlen) 1206 { 1207 struct raid_set *rs = ti->private; 1208 unsigned raid_param_cnt = 1; /* at least 1 for chunksize */ 1209 unsigned sz = 0; 1210 int i, array_in_sync = 0; 1211 sector_t sync; 1212 1213 switch (type) { 1214 case STATUSTYPE_INFO: 1215 DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks); 1216 1217 if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery)) 1218 sync = rs->md.curr_resync_completed; 1219 else 1220 sync = rs->md.recovery_cp; 1221 1222 if (sync >= rs->md.resync_max_sectors) { 1223 array_in_sync = 1; 1224 sync = rs->md.resync_max_sectors; 1225 } else { 1226 /* 1227 * The array may be doing an initial sync, or it may 1228 * be rebuilding individual components. If all the 1229 * devices are In_sync, then it is the array that is 1230 * being initialized. 1231 */ 1232 for (i = 0; i < rs->md.raid_disks; i++) 1233 if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) 1234 array_in_sync = 1; 1235 } 1236 /* 1237 * Status characters: 1238 * 'D' = Dead/Failed device 1239 * 'a' = Alive but not in-sync 1240 * 'A' = Alive and in-sync 1241 */ 1242 for (i = 0; i < rs->md.raid_disks; i++) { 1243 if (test_bit(Faulty, &rs->dev[i].rdev.flags)) 1244 DMEMIT("D"); 1245 else if (!array_in_sync || 1246 !test_bit(In_sync, &rs->dev[i].rdev.flags)) 1247 DMEMIT("a"); 1248 else 1249 DMEMIT("A"); 1250 } 1251 1252 /* 1253 * In-sync ratio: 1254 * The in-sync ratio shows the progress of: 1255 * - Initializing the array 1256 * - Rebuilding a subset of devices of the array 1257 * The user can distinguish between the two by referring 1258 * to the status characters. 1259 */ 1260 DMEMIT(" %llu/%llu", 1261 (unsigned long long) sync, 1262 (unsigned long long) rs->md.resync_max_sectors); 1263 1264 break; 1265 case STATUSTYPE_TABLE: 1266 /* The string you would use to construct this array */ 1267 for (i = 0; i < rs->md.raid_disks; i++) { 1268 if ((rs->print_flags & DMPF_REBUILD) && 1269 rs->dev[i].data_dev && 1270 !test_bit(In_sync, &rs->dev[i].rdev.flags)) 1271 raid_param_cnt += 2; /* for rebuilds */ 1272 if (rs->dev[i].data_dev && 1273 test_bit(WriteMostly, &rs->dev[i].rdev.flags)) 1274 raid_param_cnt += 2; 1275 } 1276 1277 raid_param_cnt += (hweight32(rs->print_flags & ~DMPF_REBUILD) * 2); 1278 if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC)) 1279 raid_param_cnt--; 1280 1281 DMEMIT("%s %u %u", rs->raid_type->name, 1282 raid_param_cnt, rs->md.chunk_sectors); 1283 1284 if ((rs->print_flags & DMPF_SYNC) && 1285 (rs->md.recovery_cp == MaxSector)) 1286 DMEMIT(" sync"); 1287 if (rs->print_flags & DMPF_NOSYNC) 1288 DMEMIT(" nosync"); 1289 1290 for (i = 0; i < rs->md.raid_disks; i++) 1291 if ((rs->print_flags & DMPF_REBUILD) && 1292 rs->dev[i].data_dev && 1293 !test_bit(In_sync, &rs->dev[i].rdev.flags)) 1294 DMEMIT(" rebuild %u", i); 1295 1296 if (rs->print_flags & DMPF_DAEMON_SLEEP) 1297 DMEMIT(" daemon_sleep %lu", 1298 rs->md.bitmap_info.daemon_sleep); 1299 1300 if (rs->print_flags & DMPF_MIN_RECOVERY_RATE) 1301 DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min); 1302 1303 if (rs->print_flags & DMPF_MAX_RECOVERY_RATE) 1304 DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max); 1305 1306 for (i = 0; i < rs->md.raid_disks; i++) 1307 if (rs->dev[i].data_dev && 1308 test_bit(WriteMostly, &rs->dev[i].rdev.flags)) 1309 DMEMIT(" write_mostly %u", i); 1310 1311 if (rs->print_flags & DMPF_MAX_WRITE_BEHIND) 1312 DMEMIT(" max_write_behind %lu", 1313 rs->md.bitmap_info.max_write_behind); 1314 1315 if (rs->print_flags & DMPF_STRIPE_CACHE) { 1316 struct r5conf *conf = rs->md.private; 1317 1318 /* convert from kiB to sectors */ 1319 DMEMIT(" stripe_cache %d", 1320 conf ? conf->max_nr_stripes * 2 : 0); 1321 } 1322 1323 if (rs->print_flags & DMPF_REGION_SIZE) 1324 DMEMIT(" region_size %lu", 1325 rs->md.bitmap_info.chunksize >> 9); 1326 1327 if (rs->print_flags & DMPF_RAID10_COPIES) 1328 DMEMIT(" raid10_copies %u", 1329 raid10_md_layout_to_copies(rs->md.layout)); 1330 1331 if (rs->print_flags & DMPF_RAID10_FORMAT) 1332 DMEMIT(" raid10_format near"); 1333 1334 DMEMIT(" %d", rs->md.raid_disks); 1335 for (i = 0; i < rs->md.raid_disks; i++) { 1336 if (rs->dev[i].meta_dev) 1337 DMEMIT(" %s", rs->dev[i].meta_dev->name); 1338 else 1339 DMEMIT(" -"); 1340 1341 if (rs->dev[i].data_dev) 1342 DMEMIT(" %s", rs->dev[i].data_dev->name); 1343 else 1344 DMEMIT(" -"); 1345 } 1346 } 1347 1348 return 0; 1349 } 1350 1351 static int raid_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) 1352 { 1353 struct raid_set *rs = ti->private; 1354 unsigned i; 1355 int ret = 0; 1356 1357 for (i = 0; !ret && i < rs->md.raid_disks; i++) 1358 if (rs->dev[i].data_dev) 1359 ret = fn(ti, 1360 rs->dev[i].data_dev, 1361 0, /* No offset on data devs */ 1362 rs->md.dev_sectors, 1363 data); 1364 1365 return ret; 1366 } 1367 1368 static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) 1369 { 1370 struct raid_set *rs = ti->private; 1371 unsigned chunk_size = rs->md.chunk_sectors << 9; 1372 struct r5conf *conf = rs->md.private; 1373 1374 blk_limits_io_min(limits, chunk_size); 1375 blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded)); 1376 } 1377 1378 static void raid_presuspend(struct dm_target *ti) 1379 { 1380 struct raid_set *rs = ti->private; 1381 1382 md_stop_writes(&rs->md); 1383 } 1384 1385 static void raid_postsuspend(struct dm_target *ti) 1386 { 1387 struct raid_set *rs = ti->private; 1388 1389 mddev_suspend(&rs->md); 1390 } 1391 1392 static void raid_resume(struct dm_target *ti) 1393 { 1394 struct raid_set *rs = ti->private; 1395 1396 set_bit(MD_CHANGE_DEVS, &rs->md.flags); 1397 if (!rs->bitmap_loaded) { 1398 bitmap_load(&rs->md); 1399 rs->bitmap_loaded = 1; 1400 } 1401 1402 clear_bit(MD_RECOVERY_FROZEN, &rs->md.recovery); 1403 mddev_resume(&rs->md); 1404 } 1405 1406 static struct target_type raid_target = { 1407 .name = "raid", 1408 .version = {1, 4, 1}, 1409 .module = THIS_MODULE, 1410 .ctr = raid_ctr, 1411 .dtr = raid_dtr, 1412 .map = raid_map, 1413 .status = raid_status, 1414 .iterate_devices = raid_iterate_devices, 1415 .io_hints = raid_io_hints, 1416 .presuspend = raid_presuspend, 1417 .postsuspend = raid_postsuspend, 1418 .resume = raid_resume, 1419 }; 1420 1421 static int __init dm_raid_init(void) 1422 { 1423 return dm_register_target(&raid_target); 1424 } 1425 1426 static void __exit dm_raid_exit(void) 1427 { 1428 dm_unregister_target(&raid_target); 1429 } 1430 1431 module_init(dm_raid_init); 1432 module_exit(dm_raid_exit); 1433 1434 MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target"); 1435 MODULE_ALIAS("dm-raid1"); 1436 MODULE_ALIAS("dm-raid10"); 1437 MODULE_ALIAS("dm-raid4"); 1438 MODULE_ALIAS("dm-raid5"); 1439 MODULE_ALIAS("dm-raid6"); 1440 MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>"); 1441 MODULE_LICENSE("GPL"); 1442