1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 md.h : kernel internal structure of the Linux MD driver 4 Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman 5 6 */ 7 8 #ifndef _MD_MD_H 9 #define _MD_MD_H 10 11 #include <linux/blkdev.h> 12 #include <linux/backing-dev.h> 13 #include <linux/badblocks.h> 14 #include <linux/kobject.h> 15 #include <linux/list.h> 16 #include <linux/mm.h> 17 #include <linux/mutex.h> 18 #include <linux/timer.h> 19 #include <linux/wait.h> 20 #include <linux/workqueue.h> 21 #include <linux/raid/md_u.h> 22 #include <trace/events/block.h> 23 24 #define MaxSector (~(sector_t)0) 25 /* 26 * Number of guaranteed raid bios in case of extreme VM load: 27 */ 28 #define NR_RAID_BIOS 256 29 30 enum md_submodule_type { 31 MD_PERSONALITY = 0, 32 MD_CLUSTER, 33 MD_BITMAP, 34 }; 35 36 enum md_submodule_id { 37 ID_LINEAR = LEVEL_LINEAR, 38 ID_RAID0 = 0, 39 ID_RAID1 = 1, 40 ID_RAID4 = 4, 41 ID_RAID5 = 5, 42 ID_RAID6 = 6, 43 ID_RAID10 = 10, 44 ID_CLUSTER, 45 ID_BITMAP, 46 ID_LLBITMAP, 47 ID_BITMAP_NONE, 48 }; 49 50 struct md_submodule_head { 51 enum md_submodule_type type; 52 enum md_submodule_id id; 53 const char *name; 54 struct module *owner; 55 }; 56 57 /* 58 * These flags should really be called "NO_RETRY" rather than 59 * "FAILFAST" because they don't make any promise about time lapse, 60 * only about the number of retries, which will be zero. 61 * REQ_FAILFAST_DRIVER is not included because 62 * Commit: 4a27446f3e39 ("[SCSI] modify scsi to handle new fail fast flags.") 63 * seems to suggest that the errors it avoids retrying should usually 64 * be retried. 65 */ 66 #define MD_FAILFAST (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT) 67 68 /* Status of sync thread. */ 69 enum sync_action { 70 /* 71 * Represent by MD_RECOVERY_SYNC, start when: 72 * 1) after assemble, sync data from first rdev to other copies, this 73 * must be done first before other sync actions and will only execute 74 * once; 75 * 2) resize the array(notice that this is not reshape), sync data for 76 * the new range; 77 */ 78 ACTION_RESYNC, 79 /* 80 * Represent by MD_RECOVERY_RECOVER, start when: 81 * 1) for new replacement, sync data based on the replace rdev or 82 * available copies from other rdev; 83 * 2) for new member disk while the array is degraded, sync data from 84 * other rdev; 85 * 3) reassemble after power failure or re-add a hot removed rdev, sync 86 * data from first rdev to other copies based on bitmap; 87 */ 88 ACTION_RECOVER, 89 /* 90 * Represent by MD_RECOVERY_SYNC | MD_RECOVERY_REQUESTED | 91 * MD_RECOVERY_CHECK, start when user echo "check" to sysfs api 92 * sync_action, used to check if data copies from differenct rdev are 93 * the same. The number of mismatch sectors will be exported to user 94 * by sysfs api mismatch_cnt; 95 */ 96 ACTION_CHECK, 97 /* 98 * Represent by MD_RECOVERY_SYNC | MD_RECOVERY_REQUESTED, start when 99 * user echo "repair" to sysfs api sync_action, usually paired with 100 * ACTION_CHECK, used to force syncing data once user found that there 101 * are inconsistent data, 102 */ 103 ACTION_REPAIR, 104 /* 105 * Represent by MD_RECOVERY_RESHAPE, start when new member disk is added 106 * to the conf, notice that this is different from spares or 107 * replacement; 108 */ 109 ACTION_RESHAPE, 110 /* 111 * Represent by MD_RECOVERY_FROZEN, can be set by sysfs api sync_action 112 * or internal usage like setting the array read-only, will forbid above 113 * actions. 114 */ 115 ACTION_FROZEN, 116 /* 117 * All above actions don't match. 118 */ 119 ACTION_IDLE, 120 NR_SYNC_ACTIONS, 121 }; 122 123 /* 124 * The struct embedded in rdev is used to serialize IO. 125 */ 126 struct serial_in_rdev { 127 struct rb_root_cached serial_rb; 128 spinlock_t serial_lock; 129 }; 130 131 /* 132 * MD's 'extended' device 133 */ 134 struct md_rdev { 135 struct list_head same_set; /* RAID devices within the same set */ 136 137 sector_t sectors; /* Device size (in 512bytes sectors) */ 138 struct mddev *mddev; /* RAID array if running */ 139 unsigned long last_events; /* IO event timestamp */ 140 141 /* 142 * If meta_bdev is non-NULL, it means that a separate device is 143 * being used to store the metadata (superblock/bitmap) which 144 * would otherwise be contained on the same device as the data (bdev). 145 */ 146 struct block_device *meta_bdev; 147 struct block_device *bdev; /* block device handle */ 148 struct file *bdev_file; /* Handle from open for bdev */ 149 150 struct page *sb_page, *bb_page; 151 int sb_loaded; 152 __u64 sb_events; 153 sector_t data_offset; /* start of data in array */ 154 sector_t new_data_offset;/* only relevant while reshaping */ 155 sector_t sb_start; /* offset of the super block (in 512byte sectors) */ 156 int sb_size; /* bytes in the superblock */ 157 int preferred_minor; /* autorun support */ 158 159 struct kobject kobj; 160 161 /* A device can be in one of three states based on two flags: 162 * Not working: faulty==1 in_sync==0 163 * Fully working: faulty==0 in_sync==1 164 * Working, but not 165 * in sync with array 166 * faulty==0 in_sync==0 167 * 168 * It can never have faulty==1, in_sync==1 169 * This reduces the burden of testing multiple flags in many cases 170 */ 171 172 unsigned long flags; /* bit set of 'enum flag_bits' bits. */ 173 wait_queue_head_t blocked_wait; 174 175 int desc_nr; /* descriptor index in the superblock */ 176 int raid_disk; /* role of device in array */ 177 int new_raid_disk; /* role that the device will have in 178 * the array after a level-change completes. 179 */ 180 int saved_raid_disk; /* role that device used to have in the 181 * array and could again if we did a partial 182 * resync from the bitmap 183 */ 184 union { 185 sector_t recovery_offset;/* If this device has been partially 186 * recovered, this is where we were 187 * up to. 188 */ 189 sector_t journal_tail; /* If this device is a journal device, 190 * this is the journal tail (journal 191 * recovery start point) 192 */ 193 }; 194 195 atomic_t nr_pending; /* number of pending requests. 196 * only maintained for arrays that 197 * support hot removal 198 */ 199 atomic_t read_errors; /* number of consecutive read errors that 200 * we have tried to ignore. 201 */ 202 time64_t last_read_error; /* monotonic time since our 203 * last read error 204 */ 205 atomic_t corrected_errors; /* number of corrected read errors, 206 * for reporting to userspace and storing 207 * in superblock. 208 */ 209 210 struct serial_in_rdev *serial; /* used for raid1 io serialization */ 211 212 struct kernfs_node *sysfs_state; /* handle for 'state' 213 * sysfs entry */ 214 /* handle for 'unacknowledged_bad_blocks' sysfs dentry */ 215 struct kernfs_node *sysfs_unack_badblocks; 216 /* handle for 'bad_blocks' sysfs dentry */ 217 struct kernfs_node *sysfs_badblocks; 218 struct badblocks badblocks; 219 220 struct { 221 short offset; /* Offset from superblock to start of PPL. 222 * Not used by external metadata. */ 223 unsigned int size; /* Size in sectors of the PPL space */ 224 sector_t sector; /* First sector of the PPL space */ 225 } ppl; 226 }; 227 enum flag_bits { 228 Faulty, /* device is known to have a fault */ 229 In_sync, /* device is in_sync with rest of array */ 230 Bitmap_sync, /* ..actually, not quite In_sync. Need a 231 * bitmap-based recovery to get fully in sync. 232 * The bit is only meaningful before device 233 * has been passed to pers->hot_add_disk. 234 */ 235 WriteMostly, /* Avoid reading if at all possible */ 236 AutoDetected, /* added by auto-detect */ 237 Blocked, /* An error occurred but has not yet 238 * been acknowledged by the metadata 239 * handler, so don't allow writes 240 * until it is cleared */ 241 WriteErrorSeen, /* A write error has been seen on this 242 * device 243 */ 244 FaultRecorded, /* Intermediate state for clearing 245 * Blocked. The Fault is/will-be 246 * recorded in the metadata, but that 247 * metadata hasn't been stored safely 248 * on disk yet. 249 */ 250 BlockedBadBlocks, /* A writer is blocked because they 251 * found an unacknowledged bad-block. 252 * This can safely be cleared at any 253 * time, and the writer will re-check. 254 * It may be set at any time, and at 255 * worst the writer will timeout and 256 * re-check. So setting it as 257 * accurately as possible is good, but 258 * not absolutely critical. 259 */ 260 WantReplacement, /* This device is a candidate to be 261 * hot-replaced, either because it has 262 * reported some faults, or because 263 * of explicit request. 264 */ 265 Replacement, /* This device is a replacement for 266 * a want_replacement device with same 267 * raid_disk number. 268 */ 269 Candidate, /* For clustered environments only: 270 * This device is seen locally but not 271 * by the whole cluster 272 */ 273 Journal, /* This device is used as journal for 274 * raid-5/6. 275 * Usually, this device should be faster 276 * than other devices in the array 277 */ 278 ClusterRemove, 279 ExternalBbl, /* External metadata provides bad 280 * block management for a disk 281 */ 282 FailFast, /* Minimal retries should be attempted on 283 * this device, so use REQ_FAILFAST_DEV. 284 * Also don't try to repair failed reads. 285 * It is expects that no bad block log 286 * is present. 287 */ 288 LastDev, /* Seems to be the last working dev as 289 * it didn't fail, so don't use FailFast 290 * any more for metadata 291 */ 292 CollisionCheck, /* 293 * check if there is collision between raid1 294 * serial bios. 295 */ 296 Nonrot, /* non-rotational device (SSD) */ 297 }; 298 299 static inline int is_badblock(struct md_rdev *rdev, sector_t s, sector_t sectors, 300 sector_t *first_bad, sector_t *bad_sectors) 301 { 302 if (unlikely(rdev->badblocks.count)) { 303 int rv = badblocks_check(&rdev->badblocks, rdev->data_offset + s, 304 sectors, 305 first_bad, bad_sectors); 306 if (rv) 307 *first_bad -= rdev->data_offset; 308 return rv; 309 } 310 return 0; 311 } 312 313 static inline int rdev_has_badblock(struct md_rdev *rdev, sector_t s, 314 int sectors) 315 { 316 sector_t first_bad; 317 sector_t bad_sectors; 318 319 return is_badblock(rdev, s, sectors, &first_bad, &bad_sectors); 320 } 321 322 extern bool rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 323 int is_new); 324 extern void rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 325 int is_new); 326 struct md_cluster_info; 327 struct md_cluster_operations; 328 329 /** 330 * enum mddev_flags - md device flags. 331 * @MD_ARRAY_FIRST_USE: First use of array, needs initialization. 332 * @MD_CLOSING: If set, we are closing the array, do not open it then. 333 * @MD_JOURNAL_CLEAN: A raid with journal is already clean. 334 * @MD_HAS_JOURNAL: The raid array has journal feature set. 335 * @MD_CLUSTER_RESYNC_LOCKED: cluster raid only, which means node, already took 336 * resync lock, need to release the lock. 337 * @MD_FAILFAST_SUPPORTED: Using MD_FAILFAST on metadata writes is supported as 338 * calls to md_error() will never cause the array to 339 * become failed. 340 * @MD_HAS_PPL: The raid array has PPL feature set. 341 * @MD_HAS_MULTIPLE_PPLS: The raid array has multiple PPLs feature set. 342 * @MD_NOT_READY: do_md_run() is active, so 'array_state', ust not report that 343 * array is ready yet. 344 * @MD_BROKEN: This is used to stop writes and mark array as failed. 345 * @MD_DELETED: This device is being deleted 346 * @MD_HAS_SUPERBLOCK: There is persistence sb in member disks. 347 * @MD_FAILLAST_DEV: Allow last rdev to be removed. 348 * @MD_SERIALIZE_POLICY: Enforce write IO is not reordered, just used by raid1. 349 * 350 * change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added 351 */ 352 enum mddev_flags { 353 MD_ARRAY_FIRST_USE, 354 MD_CLOSING, 355 MD_JOURNAL_CLEAN, 356 MD_HAS_JOURNAL, 357 MD_CLUSTER_RESYNC_LOCKED, 358 MD_FAILFAST_SUPPORTED, 359 MD_HAS_PPL, 360 MD_HAS_MULTIPLE_PPLS, 361 MD_NOT_READY, 362 MD_BROKEN, 363 MD_DO_DELETE, 364 MD_DELETED, 365 MD_HAS_SUPERBLOCK, 366 MD_FAILLAST_DEV, 367 MD_SERIALIZE_POLICY, 368 }; 369 370 enum mddev_sb_flags { 371 MD_SB_CHANGE_DEVS, /* Some device status has changed */ 372 MD_SB_CHANGE_CLEAN, /* transition to or from 'clean' */ 373 MD_SB_CHANGE_PENDING, /* switch from 'clean' to 'active' in progress */ 374 MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */ 375 }; 376 377 #define NR_SERIAL_INFOS 8 378 /* record current range of serialize IOs */ 379 struct serial_info { 380 struct rb_node node; 381 sector_t start; /* start sector of rb node */ 382 sector_t last; /* end sector of rb node */ 383 sector_t wnode_start; /* address of waiting nodes on the same list */ 384 sector_t _subtree_last; /* highest sector in subtree of rb node */ 385 struct list_head list_node; 386 struct list_head waiters; 387 struct completion ready; 388 }; 389 390 /* 391 * mddev->curr_resync stores the current sector of the resync but 392 * also has some overloaded values. 393 */ 394 enum { 395 /* No resync in progress */ 396 MD_RESYNC_NONE = 0, 397 /* Yielded to allow another conflicting resync to commence */ 398 MD_RESYNC_YIELDED = 1, 399 /* Delayed to check that there is no conflict with another sync */ 400 MD_RESYNC_DELAYED = 2, 401 /* Any value greater than or equal to this is in an active resync */ 402 MD_RESYNC_ACTIVE = 3, 403 }; 404 405 struct mddev { 406 void *private; 407 struct md_personality *pers; 408 dev_t unit; 409 int md_minor; 410 struct list_head disks; 411 unsigned long flags; 412 unsigned long sb_flags; 413 414 int suspended; 415 struct mutex suspend_mutex; 416 struct percpu_ref active_io; 417 int ro; 418 int sysfs_active; /* set when sysfs deletes 419 * are happening, so run/ 420 * takeover/stop are not safe 421 */ 422 struct gendisk *gendisk; /* mdraid gendisk */ 423 struct gendisk *dm_gendisk; /* dm-raid gendisk */ 424 425 struct kobject kobj; 426 int hold_active; 427 #define UNTIL_IOCTL 1 428 #define UNTIL_STOP 2 429 430 /* Superblock information */ 431 int major_version, 432 minor_version, 433 patch_version; 434 int persistent; 435 int external; /* metadata is 436 * managed externally */ 437 char metadata_type[17]; /* externally set*/ 438 int chunk_sectors; 439 time64_t ctime, utime; 440 int level, layout; 441 char clevel[16]; 442 int raid_disks; 443 int max_disks; 444 sector_t dev_sectors; /* used size of 445 * component devices */ 446 sector_t array_sectors; /* exported array size */ 447 int external_size; /* size managed 448 * externally */ 449 unsigned int logical_block_size; 450 __u64 events; 451 /* If the last 'event' was simply a clean->dirty transition, and 452 * we didn't write it to the spares, then it is safe and simple 453 * to just decrement the event count on a dirty->clean transition. 454 * So we record that possibility here. 455 */ 456 int can_decrease_events; 457 458 char uuid[16]; 459 460 /* If the array is being reshaped, we need to record the 461 * new shape and an indication of where we are up to. 462 * This is written to the superblock. 463 * If reshape_position is MaxSector, then no reshape is happening (yet). 464 */ 465 sector_t reshape_position; 466 int delta_disks, new_level, new_layout; 467 int new_chunk_sectors; 468 int reshape_backwards; 469 470 struct md_thread __rcu *thread; /* management thread */ 471 struct md_thread __rcu *sync_thread; /* doing resync or reconstruct */ 472 473 /* 474 * Set when a sync operation is started. It holds this value even 475 * when the sync thread is "frozen" (interrupted) or "idle" (stopped 476 * or finished). It is overwritten when a new sync operation is begun. 477 */ 478 enum sync_action last_sync_action; 479 sector_t curr_resync; /* last block scheduled */ 480 /* As resync requests can complete out of order, we cannot easily track 481 * how much resync has been completed. So we occasionally pause until 482 * everything completes, then set curr_resync_completed to curr_resync. 483 * As such it may be well behind the real resync mark, but it is a value 484 * we are certain of. 485 */ 486 sector_t curr_resync_completed; 487 unsigned long resync_mark; /* a recent timestamp */ 488 sector_t resync_mark_cnt;/* blocks written at resync_mark */ 489 sector_t curr_mark_cnt; /* blocks scheduled now */ 490 491 sector_t resync_max_sectors; /* may be set by personality */ 492 493 atomic64_t resync_mismatches; /* count of sectors where 494 * parity/replica mismatch found 495 */ 496 497 /* allow user-space to request suspension of IO to regions of the array */ 498 sector_t suspend_lo; 499 sector_t suspend_hi; 500 /* if zero, use the system-wide default */ 501 int sync_speed_min; 502 int sync_speed_max; 503 int sync_io_depth; 504 505 /* resync even though the same disks are shared among md-devices */ 506 int parallel_resync; 507 508 int ok_start_degraded; 509 510 unsigned long recovery; 511 512 int in_sync; /* know to not need resync */ 513 /* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so 514 * that we are never stopping an array while it is open. 515 * 'reconfig_mutex' protects all other reconfiguration. 516 * These locks are separate due to conflicting interactions 517 * with disk->open_mutex. 518 * Lock ordering is: 519 * reconfig_mutex -> disk->open_mutex 520 * disk->open_mutex -> open_mutex: e.g. __blkdev_get -> md_open 521 */ 522 struct mutex open_mutex; 523 struct mutex reconfig_mutex; 524 atomic_t active; /* general refcount */ 525 atomic_t openers; /* number of active opens */ 526 527 int changed; /* True if we might need to 528 * reread partition info */ 529 int degraded; /* whether md should consider 530 * adding a spare 531 */ 532 533 unsigned long normal_io_events; /* IO event timestamp */ 534 atomic_t recovery_active; /* blocks scheduled, but not written */ 535 wait_queue_head_t recovery_wait; 536 sector_t resync_offset; 537 sector_t resync_min; /* user requested sync 538 * starts here */ 539 sector_t resync_max; /* resync should pause 540 * when it gets here */ 541 542 struct kernfs_node *sysfs_state; /* handle for 'array_state' 543 * file in sysfs. 544 */ 545 struct kernfs_node *sysfs_action; /* handle for 'sync_action' */ 546 struct kernfs_node *sysfs_completed; /*handle for 'sync_completed' */ 547 struct kernfs_node *sysfs_degraded; /*handle for 'degraded' */ 548 struct kernfs_node *sysfs_level; /*handle for 'level' */ 549 550 /* used for delayed sysfs removal */ 551 struct work_struct del_work; 552 /* used for register new sync thread */ 553 struct work_struct sync_work; 554 555 /* "lock" protects: 556 * flush_bio transition from NULL to !NULL 557 * rdev superblocks, events 558 * clearing MD_CHANGE_* 559 * in_sync - and related safemode and MD_CHANGE changes 560 * pers (also protected by reconfig_mutex and pending IO). 561 * clearing ->bitmap 562 * clearing ->bitmap_info.file 563 * changing ->resync_{min,max} 564 * setting MD_RECOVERY_RUNNING (which interacts with resync_{min,max}) 565 */ 566 spinlock_t lock; 567 wait_queue_head_t sb_wait; /* for waiting on superblock updates */ 568 atomic_t pending_writes; /* number of active superblock writes */ 569 570 unsigned int safemode; /* if set, update "clean" superblock 571 * when no writes pending. 572 */ 573 unsigned int safemode_delay; 574 struct timer_list safemode_timer; 575 struct percpu_ref writes_pending; 576 int sync_checkers; /* # of threads checking writes_pending */ 577 578 enum md_submodule_id bitmap_id; 579 void *bitmap; /* the bitmap for the device */ 580 struct bitmap_operations *bitmap_ops; 581 struct { 582 struct file *file; /* the bitmap file */ 583 loff_t offset; /* offset from superblock of 584 * start of bitmap. May be 585 * negative, but not '0' 586 * For external metadata, offset 587 * from start of device. 588 */ 589 unsigned long space; /* space available at this offset */ 590 loff_t default_offset; /* this is the offset to use when 591 * hot-adding a bitmap. It should 592 * eventually be settable by sysfs. 593 */ 594 unsigned long default_space; /* space available at 595 * default offset */ 596 struct mutex mutex; 597 unsigned long chunksize; 598 unsigned long daemon_sleep; /* how many jiffies between updates? */ 599 unsigned long max_write_behind; /* write-behind mode */ 600 int external; 601 int nodes; /* Maximum number of nodes in the cluster */ 602 char cluster_name[64]; /* Name of the cluster */ 603 } bitmap_info; 604 605 atomic_t max_corr_read_errors; /* max read retries */ 606 struct list_head all_mddevs; 607 608 const struct attribute_group *to_remove; 609 610 struct bio_set bio_set; 611 struct bio_set sync_set; /* for sync operations like 612 * metadata and bitmap writes 613 */ 614 struct bio_set io_clone_set; 615 616 struct work_struct event_work; /* used by dm to report failure event */ 617 mempool_t *serial_info_pool; 618 void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); 619 struct md_cluster_info *cluster_info; 620 struct md_cluster_operations *cluster_ops; 621 unsigned int good_device_nr; /* good device num within cluster raid */ 622 unsigned int noio_flag; /* for memalloc scope API */ 623 624 /* 625 * Temporarily store rdev that will be finally removed when 626 * reconfig_mutex is unlocked, protected by reconfig_mutex. 627 */ 628 struct list_head deleting; 629 630 /* The sequence number for sync thread */ 631 atomic_t sync_seq; 632 }; 633 634 enum recovery_flags { 635 /* flags for sync thread running status */ 636 637 /* 638 * set when one of sync action is set and new sync thread need to be 639 * registered, or just add/remove spares from conf. 640 */ 641 MD_RECOVERY_NEEDED, 642 /* sync thread is running, or about to be started */ 643 MD_RECOVERY_RUNNING, 644 /* sync thread needs to be aborted for some reason */ 645 MD_RECOVERY_INTR, 646 /* sync thread is done and is waiting to be unregistered */ 647 MD_RECOVERY_DONE, 648 /* running sync thread must abort immediately, and not restart */ 649 MD_RECOVERY_FROZEN, 650 /* waiting for pers->start() to finish */ 651 MD_RECOVERY_WAIT, 652 653 /* flags determines sync action, see details in enum sync_action */ 654 655 /* if just this flag is set, action is resync. */ 656 MD_RECOVERY_SYNC, 657 /* 658 * paired with MD_RECOVERY_SYNC, if MD_RECOVERY_CHECK is not set, 659 * action is repair, means user requested resync. 660 */ 661 MD_RECOVERY_REQUESTED, 662 /* 663 * paired with MD_RECOVERY_SYNC and MD_RECOVERY_REQUESTED, action is 664 * check. 665 */ 666 MD_RECOVERY_CHECK, 667 /* recovery, or need to try it */ 668 MD_RECOVERY_RECOVER, 669 /* reshape */ 670 MD_RECOVERY_RESHAPE, 671 /* remote node is running resync thread */ 672 MD_RESYNCING_REMOTE, 673 /* raid456 lazy initial recover */ 674 MD_RECOVERY_LAZY_RECOVER, 675 }; 676 677 enum md_ro_state { 678 MD_RDWR, 679 MD_RDONLY, 680 MD_AUTO_READ, 681 MD_MAX_STATE 682 }; 683 684 static inline bool md_is_rdwr(struct mddev *mddev) 685 { 686 return (mddev->ro == MD_RDWR); 687 } 688 689 static inline bool reshape_interrupted(struct mddev *mddev) 690 { 691 /* reshape never start */ 692 if (mddev->reshape_position == MaxSector) 693 return false; 694 695 /* interrupted */ 696 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 697 return true; 698 699 /* running reshape will be interrupted soon. */ 700 if (test_bit(MD_RECOVERY_WAIT, &mddev->recovery) || 701 test_bit(MD_RECOVERY_INTR, &mddev->recovery) || 702 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 703 return true; 704 705 return false; 706 } 707 708 static inline int __must_check mddev_lock(struct mddev *mddev) 709 { 710 int ret; 711 712 ret = mutex_lock_interruptible(&mddev->reconfig_mutex); 713 714 /* MD_DELETED is set in do_md_stop with reconfig_mutex. 715 * So check it here. 716 */ 717 if (!ret && test_bit(MD_DELETED, &mddev->flags)) { 718 ret = -ENODEV; 719 mutex_unlock(&mddev->reconfig_mutex); 720 } 721 722 return ret; 723 } 724 725 /* Sometimes we need to take the lock in a situation where 726 * failure due to interrupts is not acceptable. 727 * It doesn't need to check MD_DELETED here, the owner which 728 * holds the lock here can't be stopped. And all paths can't 729 * call this function after do_md_stop. 730 */ 731 static inline void mddev_lock_nointr(struct mddev *mddev) 732 { 733 mutex_lock(&mddev->reconfig_mutex); 734 } 735 736 static inline int mddev_trylock(struct mddev *mddev) 737 { 738 int ret; 739 740 ret = mutex_trylock(&mddev->reconfig_mutex); 741 if (ret && test_bit(MD_DELETED, &mddev->flags)) { 742 ret = 0; 743 mutex_unlock(&mddev->reconfig_mutex); 744 } 745 return ret; 746 } 747 extern void mddev_unlock(struct mddev *mddev); 748 749 struct md_personality 750 { 751 struct md_submodule_head head; 752 753 bool __must_check (*make_request)(struct mddev *mddev, struct bio *bio); 754 /* 755 * start up works that do NOT require md_thread. tasks that 756 * requires md_thread should go into start() 757 */ 758 int (*run)(struct mddev *mddev); 759 /* start up works that require md threads */ 760 int (*start)(struct mddev *mddev); 761 void (*free)(struct mddev *mddev, void *priv); 762 void (*status)(struct seq_file *seq, struct mddev *mddev); 763 /* error_handler must set ->faulty and clear ->in_sync 764 * if appropriate, and should abort recovery if needed 765 */ 766 void (*error_handler)(struct mddev *mddev, struct md_rdev *rdev); 767 int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev); 768 int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev); 769 int (*spare_active) (struct mddev *mddev); 770 sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, 771 sector_t max_sector, int *skipped); 772 int (*resize) (struct mddev *mddev, sector_t sectors); 773 sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks); 774 int (*check_reshape) (struct mddev *mddev); 775 int (*start_reshape) (struct mddev *mddev); 776 void (*finish_reshape) (struct mddev *mddev); 777 void (*update_reshape_pos) (struct mddev *mddev); 778 void (*prepare_suspend) (struct mddev *mddev); 779 /* quiesce suspends or resumes internal processing. 780 * 1 - stop new actions and wait for action io to complete 781 * 0 - return to normal behaviour 782 */ 783 void (*quiesce) (struct mddev *mddev, int quiesce); 784 /* takeover is used to transition an array from one 785 * personality to another. The new personality must be able 786 * to handle the data in the current layout. 787 * e.g. 2drive raid1 -> 2drive raid5 788 * ndrive raid5 -> degraded n+1drive raid6 with special layout 789 * If the takeover succeeds, a new 'private' structure is returned. 790 * This needs to be installed and then ->run used to activate the 791 * array. 792 */ 793 void *(*takeover) (struct mddev *mddev); 794 /* Changes the consistency policy of an active array. */ 795 int (*change_consistency_policy)(struct mddev *mddev, const char *buf); 796 /* convert io ranges from array to bitmap */ 797 void (*bitmap_sector)(struct mddev *mddev, sector_t *offset, 798 unsigned long *sectors); 799 }; 800 801 struct md_sysfs_entry { 802 struct attribute attr; 803 ssize_t (*show)(struct mddev *, char *); 804 ssize_t (*store)(struct mddev *, const char *, size_t); 805 }; 806 807 static inline struct kernfs_node *sysfs_get_dirent_safe(struct kernfs_node *sd, char *name) 808 { 809 if (sd) 810 return sysfs_get_dirent(sd, name); 811 return sd; 812 } 813 static inline void sysfs_notify_dirent_safe(struct kernfs_node *sd) 814 { 815 if (sd) 816 sysfs_notify_dirent(sd); 817 } 818 819 static inline char * mdname (struct mddev * mddev) 820 { 821 return mddev->gendisk ? mddev->gendisk->disk_name : "mdX"; 822 } 823 824 static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev) 825 { 826 char nm[20]; 827 if (!test_bit(Replacement, &rdev->flags) && 828 !test_bit(Journal, &rdev->flags) && 829 mddev->kobj.sd) { 830 sprintf(nm, "rd%d", rdev->raid_disk); 831 return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); 832 } else 833 return 0; 834 } 835 836 static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev) 837 { 838 char nm[20]; 839 if (!test_bit(Replacement, &rdev->flags) && 840 !test_bit(Journal, &rdev->flags) && 841 mddev->kobj.sd) { 842 sprintf(nm, "rd%d", rdev->raid_disk); 843 sysfs_remove_link(&mddev->kobj, nm); 844 } 845 } 846 847 /* 848 * iterates through some rdev ringlist. It's safe to remove the 849 * current 'rdev'. Dont touch 'tmp' though. 850 */ 851 #define rdev_for_each_list(rdev, tmp, head) \ 852 list_for_each_entry_safe(rdev, tmp, head, same_set) 853 854 /* 855 * iterates through the 'same array disks' ringlist 856 */ 857 #define rdev_for_each(rdev, mddev) \ 858 list_for_each_entry(rdev, &((mddev)->disks), same_set) 859 860 #define rdev_for_each_safe(rdev, tmp, mddev) \ 861 list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set) 862 863 #define rdev_for_each_rcu(rdev, mddev) \ 864 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set) 865 866 struct md_thread { 867 void (*run) (struct md_thread *thread); 868 struct mddev *mddev; 869 wait_queue_head_t wqueue; 870 unsigned long flags; 871 struct task_struct *tsk; 872 unsigned long timeout; 873 void *private; 874 }; 875 876 struct md_io_clone { 877 struct mddev *mddev; 878 struct bio *orig_bio; 879 unsigned long start_time; 880 sector_t offset; 881 unsigned long sectors; 882 enum stat_group rw; 883 struct bio bio_clone; 884 }; 885 886 #define THREAD_WAKEUP 0 887 888 #define md_wakeup_thread(thread) do { \ 889 rcu_read_lock(); \ 890 __md_wakeup_thread(thread); \ 891 rcu_read_unlock(); \ 892 } while (0) 893 894 static inline void safe_put_page(struct page *p) 895 { 896 if (p) put_page(p); 897 } 898 899 int register_md_submodule(struct md_submodule_head *msh); 900 void unregister_md_submodule(struct md_submodule_head *msh); 901 902 extern struct md_thread *md_register_thread( 903 void (*run)(struct md_thread *thread), 904 struct mddev *mddev, 905 const char *name); 906 extern void md_unregister_thread(struct mddev *mddev, struct md_thread __rcu **threadp); 907 extern void __md_wakeup_thread(struct md_thread __rcu *thread); 908 extern void md_check_recovery(struct mddev *mddev); 909 extern void md_reap_sync_thread(struct mddev *mddev); 910 extern enum sync_action md_sync_action(struct mddev *mddev); 911 extern enum sync_action md_sync_action_by_name(const char *page); 912 extern const char *md_sync_action_name(enum sync_action action); 913 extern void md_write_start(struct mddev *mddev, struct bio *bi); 914 extern void md_write_inc(struct mddev *mddev, struct bio *bi); 915 extern void md_write_end(struct mddev *mddev); 916 extern void md_done_sync(struct mddev *mddev, int blocks); 917 extern void md_sync_error(struct mddev *mddev); 918 extern void md_error(struct mddev *mddev, struct md_rdev *rdev); 919 extern void md_finish_reshape(struct mddev *mddev); 920 void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev, 921 struct bio *bio, sector_t start, sector_t size); 922 void md_account_bio(struct mddev *mddev, struct bio **bio); 923 void md_free_cloned_bio(struct bio *bio); 924 925 extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio); 926 void md_write_metadata(struct mddev *mddev, struct md_rdev *rdev, 927 sector_t sector, int size, struct page *page, 928 unsigned int offset); 929 extern int md_super_wait(struct mddev *mddev); 930 extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, 931 struct page *page, blk_opf_t opf, bool metadata_op); 932 extern void md_do_sync(struct md_thread *thread); 933 extern void md_new_event(void); 934 extern void md_allow_write(struct mddev *mddev); 935 extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev); 936 extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors); 937 extern int md_check_no_bitmap(struct mddev *mddev); 938 extern int md_integrity_register(struct mddev *mddev); 939 extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale); 940 941 extern int mddev_init(struct mddev *mddev); 942 extern void mddev_destroy(struct mddev *mddev); 943 void md_init_stacking_limits(struct queue_limits *lim); 944 struct mddev *md_alloc(dev_t dev, char *name); 945 void mddev_put(struct mddev *mddev); 946 extern int md_run(struct mddev *mddev); 947 extern int md_start(struct mddev *mddev); 948 extern void md_stop(struct mddev *mddev); 949 extern void md_stop_writes(struct mddev *mddev); 950 extern int md_rdev_init(struct md_rdev *rdev); 951 extern void md_rdev_clear(struct md_rdev *rdev); 952 953 extern bool md_handle_request(struct mddev *mddev, struct bio *bio); 954 extern int mddev_suspend(struct mddev *mddev, bool interruptible); 955 extern void mddev_resume(struct mddev *mddev); 956 extern void md_idle_sync_thread(struct mddev *mddev); 957 extern void md_frozen_sync_thread(struct mddev *mddev); 958 extern void md_unfrozen_sync_thread(struct mddev *mddev); 959 960 extern void md_update_sb(struct mddev *mddev, int force); 961 extern void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev); 962 extern void mddev_destroy_serial_pool(struct mddev *mddev, 963 struct md_rdev *rdev); 964 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr); 965 struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev); 966 967 static inline bool is_rdev_broken(struct md_rdev *rdev) 968 { 969 return !disk_live(rdev->bdev->bd_disk); 970 } 971 972 static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev) 973 { 974 int faulty = test_bit(Faulty, &rdev->flags); 975 if (atomic_dec_and_test(&rdev->nr_pending) && faulty) { 976 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 977 md_wakeup_thread(mddev->thread); 978 } 979 } 980 981 static inline int mddev_is_clustered(struct mddev *mddev) 982 { 983 return mddev->cluster_info && mddev->bitmap_info.nodes > 1; 984 } 985 986 /* clear unsupported mddev_flags */ 987 static inline void mddev_clear_unsupported_flags(struct mddev *mddev, 988 unsigned long unsupported_flags) 989 { 990 mddev->flags &= ~unsupported_flags; 991 } 992 993 static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio) 994 { 995 if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 996 !bio->bi_bdev->bd_disk->queue->limits.max_write_zeroes_sectors) 997 mddev->gendisk->queue->limits.max_write_zeroes_sectors = 0; 998 } 999 1000 static inline int mddev_suspend_and_lock(struct mddev *mddev) 1001 { 1002 int ret; 1003 1004 ret = mddev_suspend(mddev, true); 1005 if (ret) 1006 return ret; 1007 1008 ret = mddev_lock(mddev); 1009 if (ret) 1010 mddev_resume(mddev); 1011 1012 return ret; 1013 } 1014 1015 static inline void mddev_suspend_and_lock_nointr(struct mddev *mddev) 1016 { 1017 mddev_suspend(mddev, false); 1018 mutex_lock(&mddev->reconfig_mutex); 1019 } 1020 1021 static inline void mddev_unlock_and_resume(struct mddev *mddev) 1022 { 1023 mddev_unlock(mddev); 1024 mddev_resume(mddev); 1025 } 1026 1027 struct mdu_array_info_s; 1028 struct mdu_disk_info_s; 1029 1030 extern int mdp_major; 1031 void md_autostart_arrays(int part); 1032 int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info); 1033 int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info); 1034 int do_md_run(struct mddev *mddev); 1035 #define MDDEV_STACK_INTEGRITY (1u << 0) 1036 int mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim, 1037 unsigned int flags); 1038 int mddev_stack_new_rdev(struct mddev *mddev, struct md_rdev *rdev); 1039 void mddev_update_io_opt(struct mddev *mddev, unsigned int nr_stripes); 1040 1041 extern const struct block_device_operations md_fops; 1042 1043 /* 1044 * MD devices can be used undeneath by DM, in which case ->gendisk is NULL. 1045 */ 1046 static inline bool mddev_is_dm(struct mddev *mddev) 1047 { 1048 return !mddev->gendisk; 1049 } 1050 1051 static inline bool raid_is_456(struct mddev *mddev) 1052 { 1053 return mddev->level == ID_RAID4 || mddev->level == ID_RAID5 || 1054 mddev->level == ID_RAID6; 1055 } 1056 1057 static inline void mddev_trace_remap(struct mddev *mddev, struct bio *bio, 1058 sector_t sector) 1059 { 1060 if (!mddev_is_dm(mddev)) 1061 trace_block_bio_remap(bio, disk_devt(mddev->gendisk), sector); 1062 } 1063 1064 static inline bool rdev_blocked(struct md_rdev *rdev) 1065 { 1066 /* 1067 * Blocked will be set by error handler and cleared by daemon after 1068 * updating superblock, meanwhile write IO should be blocked to prevent 1069 * reading old data after power failure. 1070 */ 1071 if (test_bit(Blocked, &rdev->flags)) 1072 return true; 1073 1074 /* 1075 * Faulty device should not be accessed anymore, there is no need to 1076 * wait for bad block to be acknowledged. 1077 */ 1078 if (test_bit(Faulty, &rdev->flags)) 1079 return false; 1080 1081 /* rdev is blocked by badblocks. */ 1082 if (test_bit(BlockedBadBlocks, &rdev->flags)) 1083 return true; 1084 1085 return false; 1086 } 1087 1088 #define mddev_add_trace_msg(mddev, fmt, args...) \ 1089 do { \ 1090 if (!mddev_is_dm(mddev)) \ 1091 blk_add_trace_msg((mddev)->gendisk->queue, fmt, ##args); \ 1092 } while (0) 1093 1094 #endif /* _MD_MD_H */ 1095