1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2003 Sistina Software Limited. 4 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. 5 * 6 * This file is released under the GPL. 7 */ 8 9 #include <linux/device-mapper.h> 10 11 #include "dm-rq.h" 12 #include "dm-bio-record.h" 13 #include "dm-path-selector.h" 14 #include "dm-uevent.h" 15 16 #include <linux/blkdev.h> 17 #include <linux/ctype.h> 18 #include <linux/init.h> 19 #include <linux/mempool.h> 20 #include <linux/module.h> 21 #include <linux/pagemap.h> 22 #include <linux/slab.h> 23 #include <linux/time.h> 24 #include <linux/timer.h> 25 #include <linux/workqueue.h> 26 #include <linux/delay.h> 27 #include <scsi/scsi_dh.h> 28 #include <linux/atomic.h> 29 #include <linux/blk-mq.h> 30 31 static struct workqueue_struct *dm_mpath_wq; 32 33 #define DM_MSG_PREFIX "multipath" 34 #define DM_PG_INIT_DELAY_MSECS 2000 35 #define DM_PG_INIT_DELAY_DEFAULT ((unsigned int) -1) 36 #define QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT 0 37 38 static unsigned long queue_if_no_path_timeout_secs = QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT; 39 40 /* Path properties */ 41 struct pgpath { 42 struct list_head list; 43 44 struct priority_group *pg; /* Owning PG */ 45 unsigned int fail_count; /* Cumulative failure count */ 46 47 struct dm_path path; 48 struct delayed_work activate_path; 49 50 bool is_active:1; /* Path status */ 51 }; 52 53 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path) 54 55 /* 56 * Paths are grouped into Priority Groups and numbered from 1 upwards. 57 * Each has a path selector which controls which path gets used. 58 */ 59 struct priority_group { 60 struct list_head list; 61 62 struct multipath *m; /* Owning multipath instance */ 63 struct path_selector ps; 64 65 unsigned int pg_num; /* Reference number */ 66 unsigned int nr_pgpaths; /* Number of paths in PG */ 67 struct list_head pgpaths; 68 69 bool bypassed:1; /* Temporarily bypass this PG? */ 70 }; 71 72 /* Multipath context */ 73 struct multipath { 74 unsigned long flags; /* Multipath state flags */ 75 76 spinlock_t lock; 77 enum dm_queue_mode queue_mode; 78 79 struct pgpath *current_pgpath; 80 struct priority_group *current_pg; 81 struct priority_group *next_pg; /* Switch to this PG if set */ 82 struct priority_group *last_probed_pg; 83 84 atomic_t nr_valid_paths; /* Total number of usable paths */ 85 unsigned int nr_priority_groups; 86 struct list_head priority_groups; 87 88 const char *hw_handler_name; 89 char *hw_handler_params; 90 wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */ 91 wait_queue_head_t probe_wait; /* Wait for probing paths */ 92 unsigned int pg_init_retries; /* Number of times to retry pg_init */ 93 unsigned int pg_init_delay_msecs; /* Number of msecs before pg_init retry */ 94 atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */ 95 atomic_t pg_init_count; /* Number of times pg_init called */ 96 97 struct mutex work_mutex; 98 struct work_struct trigger_event; 99 struct dm_target *ti; 100 101 struct work_struct process_queued_bios; 102 struct bio_list queued_bios; 103 104 struct timer_list nopath_timer; /* Timeout for queue_if_no_path */ 105 bool is_suspending; 106 }; 107 108 /* 109 * Context information attached to each io we process. 110 */ 111 struct dm_mpath_io { 112 struct pgpath *pgpath; 113 size_t nr_bytes; 114 u64 start_time_ns; 115 }; 116 117 typedef int (*action_fn) (struct pgpath *pgpath); 118 119 static struct workqueue_struct *kmultipathd, *kmpath_handlerd; 120 static void trigger_event(struct work_struct *work); 121 static void activate_or_offline_path(struct pgpath *pgpath); 122 static void activate_path_work(struct work_struct *work); 123 static void process_queued_bios(struct work_struct *work); 124 static void queue_if_no_path_timeout_work(struct timer_list *t); 125 126 /* 127 *----------------------------------------------- 128 * Multipath state flags. 129 *----------------------------------------------- 130 */ 131 #define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */ 132 #define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */ 133 #define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */ 134 /* MPATHF_RETAIN_ATTACHED_HW_HANDLER no longer has any effect */ 135 #define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */ 136 #define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */ 137 #define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */ 138 #define MPATHF_DELAY_PG_SWITCH 7 /* Delay switching pg if it still has paths */ 139 #define MPATHF_NEED_PG_SWITCH 8 /* Need to switch pgs after the delay has ended */ 140 141 static bool mpath_double_check_test_bit(int MPATHF_bit, struct multipath *m) 142 { 143 bool r = test_bit(MPATHF_bit, &m->flags); 144 145 if (r) { 146 unsigned long flags; 147 148 spin_lock_irqsave(&m->lock, flags); 149 r = test_bit(MPATHF_bit, &m->flags); 150 spin_unlock_irqrestore(&m->lock, flags); 151 } 152 153 return r; 154 } 155 156 /* 157 *----------------------------------------------- 158 * Allocation routines 159 *----------------------------------------------- 160 */ 161 static struct pgpath *alloc_pgpath(void) 162 { 163 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL); 164 165 if (!pgpath) 166 return NULL; 167 168 pgpath->is_active = true; 169 170 return pgpath; 171 } 172 173 static void free_pgpath(struct pgpath *pgpath) 174 { 175 kfree(pgpath); 176 } 177 178 static struct priority_group *alloc_priority_group(void) 179 { 180 struct priority_group *pg; 181 182 pg = kzalloc(sizeof(*pg), GFP_KERNEL); 183 184 if (pg) 185 INIT_LIST_HEAD(&pg->pgpaths); 186 187 return pg; 188 } 189 190 static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) 191 { 192 struct pgpath *pgpath, *tmp; 193 194 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) { 195 list_del(&pgpath->list); 196 dm_put_device(ti, pgpath->path.dev); 197 free_pgpath(pgpath); 198 } 199 } 200 201 static void free_priority_group(struct priority_group *pg, 202 struct dm_target *ti) 203 { 204 struct path_selector *ps = &pg->ps; 205 206 if (ps->type) { 207 ps->type->destroy(ps); 208 dm_put_path_selector(ps->type); 209 } 210 211 free_pgpaths(&pg->pgpaths, ti); 212 kfree(pg); 213 } 214 215 static struct multipath *alloc_multipath(struct dm_target *ti) 216 { 217 struct multipath *m; 218 219 m = kzalloc(sizeof(*m), GFP_KERNEL); 220 if (m) { 221 INIT_LIST_HEAD(&m->priority_groups); 222 spin_lock_init(&m->lock); 223 atomic_set(&m->nr_valid_paths, 0); 224 INIT_WORK(&m->trigger_event, trigger_event); 225 mutex_init(&m->work_mutex); 226 227 m->queue_mode = DM_TYPE_NONE; 228 229 m->ti = ti; 230 ti->private = m; 231 232 timer_setup(&m->nopath_timer, queue_if_no_path_timeout_work, 0); 233 } 234 235 return m; 236 } 237 238 static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m) 239 { 240 if (m->queue_mode == DM_TYPE_NONE) 241 m->queue_mode = DM_TYPE_REQUEST_BASED; 242 else if (m->queue_mode == DM_TYPE_BIO_BASED) 243 INIT_WORK(&m->process_queued_bios, process_queued_bios); 244 245 dm_table_set_type(ti->table, m->queue_mode); 246 247 /* 248 * Init fields that are only used when a scsi_dh is attached 249 * - must do this unconditionally (really doesn't hurt non-SCSI uses) 250 */ 251 set_bit(MPATHF_QUEUE_IO, &m->flags); 252 atomic_set(&m->pg_init_in_progress, 0); 253 atomic_set(&m->pg_init_count, 0); 254 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; 255 init_waitqueue_head(&m->pg_init_wait); 256 init_waitqueue_head(&m->probe_wait); 257 258 return 0; 259 } 260 261 static void free_multipath(struct multipath *m) 262 { 263 struct priority_group *pg, *tmp; 264 265 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) { 266 list_del(&pg->list); 267 free_priority_group(pg, m->ti); 268 } 269 270 kfree(m->hw_handler_name); 271 kfree(m->hw_handler_params); 272 mutex_destroy(&m->work_mutex); 273 kfree(m); 274 } 275 276 static struct dm_mpath_io *get_mpio(union map_info *info) 277 { 278 return info->ptr; 279 } 280 281 static size_t multipath_per_bio_data_size(void) 282 { 283 return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details); 284 } 285 286 static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio) 287 { 288 return dm_per_bio_data(bio, multipath_per_bio_data_size()); 289 } 290 291 static struct dm_bio_details *get_bio_details_from_mpio(struct dm_mpath_io *mpio) 292 { 293 /* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */ 294 void *bio_details = mpio + 1; 295 return bio_details; 296 } 297 298 static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p) 299 { 300 struct dm_mpath_io *mpio = get_mpio_from_bio(bio); 301 struct dm_bio_details *bio_details = get_bio_details_from_mpio(mpio); 302 303 mpio->nr_bytes = bio->bi_iter.bi_size; 304 mpio->pgpath = NULL; 305 mpio->start_time_ns = 0; 306 *mpio_p = mpio; 307 308 dm_bio_record(bio_details, bio); 309 } 310 311 /* 312 *----------------------------------------------- 313 * Path selection 314 *----------------------------------------------- 315 */ 316 static int __pg_init_all_paths(struct multipath *m) 317 { 318 struct pgpath *pgpath; 319 unsigned long pg_init_delay = 0; 320 321 lockdep_assert_held(&m->lock); 322 323 if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) 324 return 0; 325 326 atomic_inc(&m->pg_init_count); 327 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); 328 329 /* Check here to reset pg_init_required */ 330 if (!m->current_pg) 331 return 0; 332 333 if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags)) 334 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ? 335 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS); 336 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) { 337 /* Skip failed paths */ 338 if (!pgpath->is_active) 339 continue; 340 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path, 341 pg_init_delay)) 342 atomic_inc(&m->pg_init_in_progress); 343 } 344 return atomic_read(&m->pg_init_in_progress); 345 } 346 347 static int pg_init_all_paths(struct multipath *m) 348 { 349 int ret; 350 unsigned long flags; 351 352 spin_lock_irqsave(&m->lock, flags); 353 ret = __pg_init_all_paths(m); 354 spin_unlock_irqrestore(&m->lock, flags); 355 356 return ret; 357 } 358 359 static void __switch_pg(struct multipath *m, struct priority_group *pg) 360 { 361 lockdep_assert_held(&m->lock); 362 363 m->current_pg = pg; 364 365 /* Must we initialise the PG first, and queue I/O till it's ready? */ 366 if (m->hw_handler_name) { 367 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); 368 set_bit(MPATHF_QUEUE_IO, &m->flags); 369 } else { 370 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); 371 clear_bit(MPATHF_QUEUE_IO, &m->flags); 372 } 373 374 atomic_set(&m->pg_init_count, 0); 375 } 376 377 static struct pgpath *choose_path_in_pg(struct multipath *m, 378 struct priority_group *pg, 379 size_t nr_bytes) 380 { 381 unsigned long flags; 382 struct dm_path *path; 383 struct pgpath *pgpath; 384 385 path = pg->ps.type->select_path(&pg->ps, nr_bytes); 386 if (!path) 387 return ERR_PTR(-ENXIO); 388 389 pgpath = path_to_pgpath(path); 390 391 if (unlikely(READ_ONCE(m->current_pg) != pg)) { 392 /* Only update current_pgpath if pg changed */ 393 spin_lock_irqsave(&m->lock, flags); 394 m->current_pgpath = pgpath; 395 __switch_pg(m, pg); 396 spin_unlock_irqrestore(&m->lock, flags); 397 } 398 399 return pgpath; 400 } 401 402 static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) 403 { 404 unsigned long flags; 405 struct priority_group *pg; 406 struct pgpath *pgpath; 407 unsigned int bypassed = 1; 408 409 if (!atomic_read(&m->nr_valid_paths)) { 410 spin_lock_irqsave(&m->lock, flags); 411 clear_bit(MPATHF_QUEUE_IO, &m->flags); 412 spin_unlock_irqrestore(&m->lock, flags); 413 goto failed; 414 } 415 416 /* Don't change PG until it has no remaining paths */ 417 pg = READ_ONCE(m->current_pg); 418 if (pg) { 419 pgpath = choose_path_in_pg(m, pg, nr_bytes); 420 if (!IS_ERR_OR_NULL(pgpath)) 421 return pgpath; 422 } 423 424 /* Were we instructed to switch PG? */ 425 if (READ_ONCE(m->next_pg)) { 426 spin_lock_irqsave(&m->lock, flags); 427 pg = m->next_pg; 428 if (!pg) { 429 spin_unlock_irqrestore(&m->lock, flags); 430 goto check_all_pgs; 431 } 432 m->next_pg = NULL; 433 spin_unlock_irqrestore(&m->lock, flags); 434 pgpath = choose_path_in_pg(m, pg, nr_bytes); 435 if (!IS_ERR_OR_NULL(pgpath)) 436 return pgpath; 437 } 438 check_all_pgs: 439 /* 440 * Loop through priority groups until we find a valid path. 441 * First time we skip PGs marked 'bypassed'. 442 * Second time we only try the ones we skipped, but set 443 * pg_init_delay_retry so we do not hammer controllers. 444 */ 445 do { 446 list_for_each_entry(pg, &m->priority_groups, list) { 447 if (pg->bypassed == !!bypassed) 448 continue; 449 pgpath = choose_path_in_pg(m, pg, nr_bytes); 450 if (!IS_ERR_OR_NULL(pgpath)) { 451 if (!bypassed) { 452 spin_lock_irqsave(&m->lock, flags); 453 set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags); 454 spin_unlock_irqrestore(&m->lock, flags); 455 } 456 return pgpath; 457 } 458 } 459 } while (bypassed--); 460 461 failed: 462 spin_lock_irqsave(&m->lock, flags); 463 m->current_pgpath = NULL; 464 m->current_pg = NULL; 465 spin_unlock_irqrestore(&m->lock, flags); 466 467 return NULL; 468 } 469 470 /* 471 * dm_report_EIO() is a macro instead of a function to make pr_debug_ratelimited() 472 * report the function name and line number of the function from which 473 * it has been invoked. 474 */ 475 #define dm_report_EIO(m) \ 476 DMDEBUG_LIMIT("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d", \ 477 dm_table_device_name((m)->ti->table), \ 478 test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \ 479 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \ 480 dm_noflush_suspending((m)->ti)) 481 482 /* 483 * Check whether bios must be queued in the device-mapper core rather 484 * than here in the target. 485 */ 486 static bool __must_push_back(struct multipath *m) 487 { 488 return dm_noflush_suspending(m->ti); 489 } 490 491 static bool must_push_back_rq(struct multipath *m) 492 { 493 unsigned long flags; 494 bool ret; 495 496 spin_lock_irqsave(&m->lock, flags); 497 ret = (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) || __must_push_back(m)); 498 spin_unlock_irqrestore(&m->lock, flags); 499 500 return ret; 501 } 502 503 /* 504 * Map cloned requests (request-based multipath) 505 */ 506 static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, 507 union map_info *map_context, 508 struct request **__clone) 509 { 510 struct multipath *m = ti->private; 511 size_t nr_bytes = blk_rq_bytes(rq); 512 struct pgpath *pgpath; 513 struct block_device *bdev; 514 struct dm_mpath_io *mpio = get_mpio(map_context); 515 struct request_queue *q; 516 struct request *clone; 517 518 /* Do we need to select a new pgpath? */ 519 pgpath = READ_ONCE(m->current_pgpath); 520 if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) 521 pgpath = choose_pgpath(m, nr_bytes); 522 523 if (!pgpath) { 524 if (must_push_back_rq(m)) 525 return DM_MAPIO_DELAY_REQUEUE; 526 dm_report_EIO(m); /* Failed */ 527 return DM_MAPIO_KILL; 528 } else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) || 529 mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) { 530 pg_init_all_paths(m); 531 return DM_MAPIO_DELAY_REQUEUE; 532 } 533 534 mpio->pgpath = pgpath; 535 mpio->nr_bytes = nr_bytes; 536 537 bdev = pgpath->path.dev->bdev; 538 q = bdev_get_queue(bdev); 539 clone = blk_mq_alloc_request(q, rq->cmd_flags | REQ_NOMERGE, 540 BLK_MQ_REQ_NOWAIT); 541 if (IS_ERR(clone)) { 542 /* EBUSY, ENODEV or EWOULDBLOCK: requeue */ 543 if (blk_queue_dying(q)) { 544 atomic_inc(&m->pg_init_in_progress); 545 activate_or_offline_path(pgpath); 546 return DM_MAPIO_DELAY_REQUEUE; 547 } 548 549 /* 550 * blk-mq's SCHED_RESTART can cover this requeue, so we 551 * needn't deal with it by DELAY_REQUEUE. More importantly, 552 * we have to return DM_MAPIO_REQUEUE so that blk-mq can 553 * get the queue busy feedback (via BLK_STS_RESOURCE), 554 * otherwise I/O merging can suffer. 555 */ 556 return DM_MAPIO_REQUEUE; 557 } 558 clone->bio = clone->biotail = NULL; 559 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; 560 *__clone = clone; 561 562 if (pgpath->pg->ps.type->start_io) 563 pgpath->pg->ps.type->start_io(&pgpath->pg->ps, 564 &pgpath->path, 565 nr_bytes); 566 return DM_MAPIO_REMAPPED; 567 } 568 569 static void multipath_release_clone(struct request *clone, 570 union map_info *map_context) 571 { 572 if (unlikely(map_context)) { 573 /* 574 * non-NULL map_context means caller is still map 575 * method; must undo multipath_clone_and_map() 576 */ 577 struct dm_mpath_io *mpio = get_mpio(map_context); 578 struct pgpath *pgpath = mpio->pgpath; 579 580 if (pgpath && pgpath->pg->ps.type->end_io) 581 pgpath->pg->ps.type->end_io(&pgpath->pg->ps, 582 &pgpath->path, 583 mpio->nr_bytes, 584 clone->io_start_time_ns); 585 } 586 587 blk_mq_free_request(clone); 588 } 589 590 /* 591 * Map cloned bios (bio-based multipath) 592 */ 593 594 static void __multipath_queue_bio(struct multipath *m, struct bio *bio) 595 { 596 /* Queue for the daemon to resubmit */ 597 bio_list_add(&m->queued_bios, bio); 598 if (!test_bit(MPATHF_QUEUE_IO, &m->flags)) 599 queue_work(kmultipathd, &m->process_queued_bios); 600 } 601 602 static void multipath_queue_bio(struct multipath *m, struct bio *bio) 603 { 604 unsigned long flags; 605 606 spin_lock_irqsave(&m->lock, flags); 607 __multipath_queue_bio(m, bio); 608 spin_unlock_irqrestore(&m->lock, flags); 609 } 610 611 static struct pgpath *__map_bio(struct multipath *m, struct bio *bio) 612 { 613 struct pgpath *pgpath; 614 615 /* Do we need to select a new pgpath? */ 616 pgpath = READ_ONCE(m->current_pgpath); 617 if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) 618 pgpath = choose_pgpath(m, bio->bi_iter.bi_size); 619 620 if (!pgpath) { 621 spin_lock_irq(&m->lock); 622 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { 623 __multipath_queue_bio(m, bio); 624 pgpath = ERR_PTR(-EAGAIN); 625 } 626 spin_unlock_irq(&m->lock); 627 628 } else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) || 629 mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) { 630 multipath_queue_bio(m, bio); 631 pg_init_all_paths(m); 632 return ERR_PTR(-EAGAIN); 633 } 634 635 return pgpath; 636 } 637 638 static int __multipath_map_bio(struct multipath *m, struct bio *bio, 639 struct dm_mpath_io *mpio) 640 { 641 struct pgpath *pgpath = __map_bio(m, bio); 642 643 if (IS_ERR(pgpath)) 644 return DM_MAPIO_SUBMITTED; 645 646 if (!pgpath) { 647 if (__must_push_back(m)) 648 return DM_MAPIO_REQUEUE; 649 dm_report_EIO(m); 650 return DM_MAPIO_KILL; 651 } 652 653 mpio->pgpath = pgpath; 654 655 if (dm_ps_use_hr_timer(pgpath->pg->ps.type)) 656 mpio->start_time_ns = ktime_get_ns(); 657 658 bio->bi_status = 0; 659 bio_set_dev(bio, pgpath->path.dev->bdev); 660 bio->bi_opf |= REQ_FAILFAST_TRANSPORT; 661 662 if (pgpath->pg->ps.type->start_io) 663 pgpath->pg->ps.type->start_io(&pgpath->pg->ps, 664 &pgpath->path, 665 mpio->nr_bytes); 666 return DM_MAPIO_REMAPPED; 667 } 668 669 static int multipath_map_bio(struct dm_target *ti, struct bio *bio) 670 { 671 struct multipath *m = ti->private; 672 struct dm_mpath_io *mpio = NULL; 673 674 multipath_init_per_bio_data(bio, &mpio); 675 return __multipath_map_bio(m, bio, mpio); 676 } 677 678 static void process_queued_io_list(struct multipath *m) 679 { 680 if (m->queue_mode == DM_TYPE_REQUEST_BASED) 681 dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table)); 682 else if (m->queue_mode == DM_TYPE_BIO_BASED) 683 queue_work(kmultipathd, &m->process_queued_bios); 684 } 685 686 static void process_queued_bios(struct work_struct *work) 687 { 688 int r; 689 struct bio *bio; 690 struct bio_list bios; 691 struct blk_plug plug; 692 struct multipath *m = 693 container_of(work, struct multipath, process_queued_bios); 694 695 bio_list_init(&bios); 696 697 spin_lock_irq(&m->lock); 698 699 if (bio_list_empty(&m->queued_bios)) { 700 spin_unlock_irq(&m->lock); 701 return; 702 } 703 704 bio_list_merge_init(&bios, &m->queued_bios); 705 706 spin_unlock_irq(&m->lock); 707 708 blk_start_plug(&plug); 709 while ((bio = bio_list_pop(&bios))) { 710 struct dm_mpath_io *mpio = get_mpio_from_bio(bio); 711 712 dm_bio_restore(get_bio_details_from_mpio(mpio), bio); 713 r = __multipath_map_bio(m, bio, mpio); 714 switch (r) { 715 case DM_MAPIO_KILL: 716 bio->bi_status = BLK_STS_IOERR; 717 bio_endio(bio); 718 break; 719 case DM_MAPIO_REQUEUE: 720 bio->bi_status = BLK_STS_DM_REQUEUE; 721 bio_endio(bio); 722 break; 723 case DM_MAPIO_REMAPPED: 724 submit_bio_noacct(bio); 725 break; 726 case DM_MAPIO_SUBMITTED: 727 break; 728 default: 729 WARN_ONCE(true, "__multipath_map_bio() returned %d\n", r); 730 } 731 } 732 blk_finish_plug(&plug); 733 } 734 735 /* 736 * If we run out of usable paths, should we queue I/O or error it? 737 */ 738 static int queue_if_no_path(struct multipath *m, bool f_queue_if_no_path, 739 bool save_old_value, const char *caller) 740 { 741 unsigned long flags; 742 bool queue_if_no_path_bit, saved_queue_if_no_path_bit; 743 const char *dm_dev_name = dm_table_device_name(m->ti->table); 744 745 DMDEBUG("%s: %s caller=%s f_queue_if_no_path=%d save_old_value=%d", 746 dm_dev_name, __func__, caller, f_queue_if_no_path, save_old_value); 747 748 spin_lock_irqsave(&m->lock, flags); 749 750 queue_if_no_path_bit = test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags); 751 saved_queue_if_no_path_bit = test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags); 752 753 if (save_old_value) { 754 if (unlikely(!queue_if_no_path_bit && saved_queue_if_no_path_bit)) { 755 DMERR("%s: QIFNP disabled but saved as enabled, saving again loses state, not saving!", 756 dm_dev_name); 757 } else 758 assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path_bit); 759 } else if (!f_queue_if_no_path && saved_queue_if_no_path_bit) { 760 /* due to "fail_if_no_path" message, need to honor it. */ 761 clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags); 762 } 763 assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, f_queue_if_no_path); 764 765 DMDEBUG("%s: after %s changes; QIFNP = %d; SQIFNP = %d; DNFS = %d", 766 dm_dev_name, __func__, 767 test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags), 768 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags), 769 dm_noflush_suspending(m->ti)); 770 771 spin_unlock_irqrestore(&m->lock, flags); 772 773 if (!f_queue_if_no_path) { 774 dm_table_run_md_queue_async(m->ti->table); 775 process_queued_io_list(m); 776 } 777 778 return 0; 779 } 780 781 /* 782 * If the queue_if_no_path timeout fires, turn off queue_if_no_path and 783 * process any queued I/O. 784 */ 785 static void queue_if_no_path_timeout_work(struct timer_list *t) 786 { 787 struct multipath *m = timer_container_of(m, t, nopath_timer); 788 789 DMWARN("queue_if_no_path timeout on %s, failing queued IO", 790 dm_table_device_name(m->ti->table)); 791 queue_if_no_path(m, false, false, __func__); 792 } 793 794 /* 795 * Enable the queue_if_no_path timeout if necessary. 796 * Called with m->lock held. 797 */ 798 static void enable_nopath_timeout(struct multipath *m) 799 { 800 unsigned long queue_if_no_path_timeout = 801 READ_ONCE(queue_if_no_path_timeout_secs) * HZ; 802 803 lockdep_assert_held(&m->lock); 804 805 if (queue_if_no_path_timeout > 0 && 806 atomic_read(&m->nr_valid_paths) == 0 && 807 test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { 808 mod_timer(&m->nopath_timer, 809 jiffies + queue_if_no_path_timeout); 810 } 811 } 812 813 static void disable_nopath_timeout(struct multipath *m) 814 { 815 timer_delete_sync(&m->nopath_timer); 816 } 817 818 /* 819 * An event is triggered whenever a path is taken out of use. 820 * Includes path failure and PG bypass. 821 */ 822 static void trigger_event(struct work_struct *work) 823 { 824 struct multipath *m = 825 container_of(work, struct multipath, trigger_event); 826 827 dm_table_event(m->ti->table); 828 } 829 830 /* 831 *--------------------------------------------------------------- 832 * Constructor/argument parsing: 833 * <#multipath feature args> [<arg>]* 834 * <#hw_handler args> [hw_handler [<arg>]*] 835 * <#priority groups> 836 * <initial priority group> 837 * [<selector> <#selector args> [<arg>]* 838 * <#paths> <#per-path selector args> 839 * [<path> [<arg>]* ]+ ]+ 840 *--------------------------------------------------------------- 841 */ 842 static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg, 843 struct dm_target *ti) 844 { 845 int r; 846 struct path_selector_type *pst; 847 unsigned int ps_argc; 848 849 static const struct dm_arg _args[] = { 850 {0, 1024, "invalid number of path selector args"}, 851 }; 852 853 pst = dm_get_path_selector(dm_shift_arg(as)); 854 if (!pst) { 855 ti->error = "unknown path selector type"; 856 return -EINVAL; 857 } 858 859 r = dm_read_arg_group(_args, as, &ps_argc, &ti->error); 860 if (r) { 861 dm_put_path_selector(pst); 862 return -EINVAL; 863 } 864 865 r = pst->create(&pg->ps, ps_argc, as->argv); 866 if (r) { 867 dm_put_path_selector(pst); 868 ti->error = "path selector constructor failed"; 869 return r; 870 } 871 872 pg->ps.type = pst; 873 dm_consume_args(as, ps_argc); 874 875 return 0; 876 } 877 878 static int setup_scsi_dh(struct block_device *bdev, struct multipath *m, 879 const char **attached_handler_name, char **error) 880 { 881 struct request_queue *q = bdev_get_queue(bdev); 882 int r; 883 884 if (*attached_handler_name) { 885 /* 886 * Clear any hw_handler_params associated with a 887 * handler that isn't already attached. 888 */ 889 if (m->hw_handler_name && strcmp(*attached_handler_name, 890 m->hw_handler_name)) { 891 kfree(m->hw_handler_params); 892 m->hw_handler_params = NULL; 893 } 894 895 /* 896 * Reset hw_handler_name to match the attached handler 897 * 898 * NB. This modifies the table line to show the actual 899 * handler instead of the original table passed in. 900 */ 901 kfree(m->hw_handler_name); 902 m->hw_handler_name = *attached_handler_name; 903 *attached_handler_name = NULL; 904 } 905 906 if (m->hw_handler_name) { 907 r = scsi_dh_attach(q, m->hw_handler_name); 908 if (r < 0) { 909 *error = "error attaching hardware handler"; 910 return r; 911 } 912 913 if (m->hw_handler_params) { 914 r = scsi_dh_set_params(q, m->hw_handler_params); 915 if (r < 0) { 916 *error = "unable to set hardware handler parameters"; 917 return r; 918 } 919 } 920 } 921 922 return 0; 923 } 924 925 static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps, 926 struct dm_target *ti) 927 { 928 int r; 929 struct pgpath *p; 930 struct multipath *m = ti->private; 931 struct request_queue *q; 932 const char *attached_handler_name = NULL; 933 934 /* we need at least a path arg */ 935 if (as->argc < 1) { 936 ti->error = "no device given"; 937 return ERR_PTR(-EINVAL); 938 } 939 940 p = alloc_pgpath(); 941 if (!p) 942 return ERR_PTR(-ENOMEM); 943 944 r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table), 945 &p->path.dev); 946 if (r) { 947 ti->error = "error getting device"; 948 goto bad; 949 } 950 951 q = bdev_get_queue(p->path.dev->bdev); 952 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL); 953 if (attached_handler_name || m->hw_handler_name) { 954 INIT_DELAYED_WORK(&p->activate_path, activate_path_work); 955 r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error); 956 kfree(attached_handler_name); 957 if (r) { 958 dm_put_device(ti, p->path.dev); 959 goto bad; 960 } 961 } 962 963 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error); 964 if (r) { 965 dm_put_device(ti, p->path.dev); 966 goto bad; 967 } 968 969 return p; 970 bad: 971 free_pgpath(p); 972 return ERR_PTR(r); 973 } 974 975 static struct priority_group *parse_priority_group(struct dm_arg_set *as, 976 struct multipath *m) 977 { 978 static const struct dm_arg _args[] = { 979 {1, 1024, "invalid number of paths"}, 980 {0, 1024, "invalid number of selector args"} 981 }; 982 983 int r; 984 unsigned int i, nr_selector_args, nr_args; 985 struct priority_group *pg; 986 struct dm_target *ti = m->ti; 987 988 if (as->argc < 2) { 989 as->argc = 0; 990 ti->error = "not enough priority group arguments"; 991 return ERR_PTR(-EINVAL); 992 } 993 994 pg = alloc_priority_group(); 995 if (!pg) { 996 ti->error = "couldn't allocate priority group"; 997 return ERR_PTR(-ENOMEM); 998 } 999 pg->m = m; 1000 1001 r = parse_path_selector(as, pg, ti); 1002 if (r) 1003 goto bad; 1004 1005 /* 1006 * read the paths 1007 */ 1008 r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error); 1009 if (r) 1010 goto bad; 1011 1012 r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error); 1013 if (r) 1014 goto bad; 1015 1016 nr_args = 1 + nr_selector_args; 1017 for (i = 0; i < pg->nr_pgpaths; i++) { 1018 struct pgpath *pgpath; 1019 struct dm_arg_set path_args; 1020 1021 if (as->argc < nr_args) { 1022 ti->error = "not enough path parameters"; 1023 r = -EINVAL; 1024 goto bad; 1025 } 1026 1027 path_args.argc = nr_args; 1028 path_args.argv = as->argv; 1029 1030 pgpath = parse_path(&path_args, &pg->ps, ti); 1031 if (IS_ERR(pgpath)) { 1032 r = PTR_ERR(pgpath); 1033 goto bad; 1034 } 1035 1036 pgpath->pg = pg; 1037 list_add_tail(&pgpath->list, &pg->pgpaths); 1038 dm_consume_args(as, nr_args); 1039 } 1040 1041 return pg; 1042 1043 bad: 1044 free_priority_group(pg, ti); 1045 return ERR_PTR(r); 1046 } 1047 1048 static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m) 1049 { 1050 unsigned int hw_argc; 1051 int ret; 1052 struct dm_target *ti = m->ti; 1053 1054 static const struct dm_arg _args[] = { 1055 {0, 1024, "invalid number of hardware handler args"}, 1056 }; 1057 1058 if (dm_read_arg_group(_args, as, &hw_argc, &ti->error)) 1059 return -EINVAL; 1060 1061 if (!hw_argc) 1062 return 0; 1063 1064 if (m->queue_mode == DM_TYPE_BIO_BASED) { 1065 dm_consume_args(as, hw_argc); 1066 DMERR("bio-based multipath doesn't allow hardware handler args"); 1067 return 0; 1068 } 1069 1070 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL); 1071 if (!m->hw_handler_name) 1072 return -EINVAL; 1073 1074 if (hw_argc > 1) { 1075 char *p; 1076 int i, j, len = 4; 1077 1078 for (i = 0; i <= hw_argc - 2; i++) 1079 len += strlen(as->argv[i]) + 1; 1080 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL); 1081 if (!p) { 1082 ti->error = "memory allocation failed"; 1083 ret = -ENOMEM; 1084 goto fail; 1085 } 1086 j = sprintf(p, "%d", hw_argc - 1); 1087 for (i = 0, p += j + 1; i <= hw_argc - 2; i++, p += j + 1) 1088 j = sprintf(p, "%s", as->argv[i]); 1089 } 1090 dm_consume_args(as, hw_argc - 1); 1091 1092 return 0; 1093 fail: 1094 kfree(m->hw_handler_name); 1095 m->hw_handler_name = NULL; 1096 return ret; 1097 } 1098 1099 static int parse_features(struct dm_arg_set *as, struct multipath *m) 1100 { 1101 int r; 1102 unsigned int argc; 1103 struct dm_target *ti = m->ti; 1104 const char *arg_name; 1105 1106 static const struct dm_arg _args[] = { 1107 {0, 8, "invalid number of feature args"}, 1108 {1, 50, "pg_init_retries must be between 1 and 50"}, 1109 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"}, 1110 }; 1111 1112 r = dm_read_arg_group(_args, as, &argc, &ti->error); 1113 if (r) 1114 return -EINVAL; 1115 1116 if (!argc) 1117 return 0; 1118 1119 do { 1120 arg_name = dm_shift_arg(as); 1121 argc--; 1122 1123 if (!strcasecmp(arg_name, "queue_if_no_path")) { 1124 r = queue_if_no_path(m, true, false, __func__); 1125 continue; 1126 } 1127 1128 if (!strcasecmp(arg_name, "retain_attached_hw_handler")) { 1129 /* no longer has any effect */ 1130 continue; 1131 } 1132 1133 if (!strcasecmp(arg_name, "pg_init_retries") && 1134 (argc >= 1)) { 1135 r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error); 1136 argc--; 1137 continue; 1138 } 1139 1140 if (!strcasecmp(arg_name, "pg_init_delay_msecs") && 1141 (argc >= 1)) { 1142 r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error); 1143 argc--; 1144 continue; 1145 } 1146 1147 if (!strcasecmp(arg_name, "queue_mode") && 1148 (argc >= 1)) { 1149 const char *queue_mode_name = dm_shift_arg(as); 1150 1151 if (!strcasecmp(queue_mode_name, "bio")) 1152 m->queue_mode = DM_TYPE_BIO_BASED; 1153 else if (!strcasecmp(queue_mode_name, "rq") || 1154 !strcasecmp(queue_mode_name, "mq")) 1155 m->queue_mode = DM_TYPE_REQUEST_BASED; 1156 else { 1157 ti->error = "Unknown 'queue_mode' requested"; 1158 r = -EINVAL; 1159 } 1160 argc--; 1161 continue; 1162 } 1163 1164 ti->error = "Unrecognised multipath feature request"; 1165 r = -EINVAL; 1166 } while (argc && !r); 1167 1168 return r; 1169 } 1170 1171 static int multipath_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1172 { 1173 /* target arguments */ 1174 static const struct dm_arg _args[] = { 1175 {0, 1024, "invalid number of priority groups"}, 1176 {0, 1024, "invalid initial priority group number"}, 1177 }; 1178 1179 int r; 1180 struct multipath *m; 1181 struct dm_arg_set as; 1182 unsigned int pg_count = 0; 1183 unsigned int next_pg_num; 1184 1185 as.argc = argc; 1186 as.argv = argv; 1187 1188 m = alloc_multipath(ti); 1189 if (!m) { 1190 ti->error = "can't allocate multipath"; 1191 return -EINVAL; 1192 } 1193 1194 r = parse_features(&as, m); 1195 if (r) 1196 goto bad; 1197 1198 r = alloc_multipath_stage2(ti, m); 1199 if (r) 1200 goto bad; 1201 1202 r = parse_hw_handler(&as, m); 1203 if (r) 1204 goto bad; 1205 1206 r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error); 1207 if (r) 1208 goto bad; 1209 1210 r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error); 1211 if (r) 1212 goto bad; 1213 1214 if ((!m->nr_priority_groups && next_pg_num) || 1215 (m->nr_priority_groups && !next_pg_num)) { 1216 ti->error = "invalid initial priority group"; 1217 r = -EINVAL; 1218 goto bad; 1219 } 1220 1221 /* parse the priority groups */ 1222 while (as.argc) { 1223 struct priority_group *pg; 1224 unsigned int nr_valid_paths = atomic_read(&m->nr_valid_paths); 1225 1226 pg = parse_priority_group(&as, m); 1227 if (IS_ERR(pg)) { 1228 r = PTR_ERR(pg); 1229 goto bad; 1230 } 1231 1232 nr_valid_paths += pg->nr_pgpaths; 1233 atomic_set(&m->nr_valid_paths, nr_valid_paths); 1234 1235 list_add_tail(&pg->list, &m->priority_groups); 1236 pg_count++; 1237 pg->pg_num = pg_count; 1238 if (!--next_pg_num) 1239 m->next_pg = pg; 1240 } 1241 1242 if (pg_count != m->nr_priority_groups) { 1243 ti->error = "priority group count mismatch"; 1244 r = -EINVAL; 1245 goto bad; 1246 } 1247 1248 spin_lock_irq(&m->lock); 1249 enable_nopath_timeout(m); 1250 spin_unlock_irq(&m->lock); 1251 1252 ti->num_flush_bios = 1; 1253 ti->num_discard_bios = 1; 1254 ti->num_write_zeroes_bios = 1; 1255 if (m->queue_mode == DM_TYPE_BIO_BASED) 1256 ti->per_io_data_size = multipath_per_bio_data_size(); 1257 else 1258 ti->per_io_data_size = sizeof(struct dm_mpath_io); 1259 1260 return 0; 1261 1262 bad: 1263 free_multipath(m); 1264 return r; 1265 } 1266 1267 static void multipath_wait_for_pg_init_completion(struct multipath *m) 1268 { 1269 DEFINE_WAIT(wait); 1270 1271 while (1) { 1272 prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE); 1273 1274 if (!atomic_read(&m->pg_init_in_progress)) 1275 break; 1276 1277 io_schedule(); 1278 } 1279 finish_wait(&m->pg_init_wait, &wait); 1280 } 1281 1282 static void flush_multipath_work(struct multipath *m) 1283 { 1284 if (m->hw_handler_name) { 1285 if (!atomic_read(&m->pg_init_in_progress)) 1286 goto skip; 1287 1288 spin_lock_irq(&m->lock); 1289 if (atomic_read(&m->pg_init_in_progress) && 1290 !test_and_set_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) { 1291 spin_unlock_irq(&m->lock); 1292 1293 flush_workqueue(kmpath_handlerd); 1294 multipath_wait_for_pg_init_completion(m); 1295 1296 spin_lock_irq(&m->lock); 1297 clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags); 1298 } 1299 spin_unlock_irq(&m->lock); 1300 } 1301 skip: 1302 if (m->queue_mode == DM_TYPE_BIO_BASED) 1303 flush_work(&m->process_queued_bios); 1304 flush_work(&m->trigger_event); 1305 } 1306 1307 static void multipath_dtr(struct dm_target *ti) 1308 { 1309 struct multipath *m = ti->private; 1310 1311 disable_nopath_timeout(m); 1312 flush_multipath_work(m); 1313 free_multipath(m); 1314 } 1315 1316 /* 1317 * Take a path out of use. 1318 */ 1319 static int fail_path(struct pgpath *pgpath) 1320 { 1321 unsigned long flags; 1322 struct multipath *m = pgpath->pg->m; 1323 1324 spin_lock_irqsave(&m->lock, flags); 1325 1326 if (!pgpath->is_active) 1327 goto out; 1328 1329 DMWARN("%s: Failing path %s.", 1330 dm_table_device_name(m->ti->table), 1331 pgpath->path.dev->name); 1332 1333 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path); 1334 pgpath->is_active = false; 1335 pgpath->fail_count++; 1336 1337 atomic_dec(&m->nr_valid_paths); 1338 1339 if (pgpath == m->current_pgpath) 1340 m->current_pgpath = NULL; 1341 1342 dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti, 1343 pgpath->path.dev->name, atomic_read(&m->nr_valid_paths)); 1344 1345 queue_work(dm_mpath_wq, &m->trigger_event); 1346 1347 enable_nopath_timeout(m); 1348 1349 out: 1350 spin_unlock_irqrestore(&m->lock, flags); 1351 1352 return 0; 1353 } 1354 1355 /* 1356 * Reinstate a previously-failed path 1357 */ 1358 static int reinstate_path(struct pgpath *pgpath) 1359 { 1360 int r = 0, run_queue = 0; 1361 struct multipath *m = pgpath->pg->m; 1362 unsigned int nr_valid_paths; 1363 1364 spin_lock_irq(&m->lock); 1365 1366 if (pgpath->is_active) 1367 goto out; 1368 1369 DMWARN("%s: Reinstating path %s.", 1370 dm_table_device_name(m->ti->table), 1371 pgpath->path.dev->name); 1372 1373 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path); 1374 if (r) 1375 goto out; 1376 1377 pgpath->is_active = true; 1378 1379 nr_valid_paths = atomic_inc_return(&m->nr_valid_paths); 1380 if (nr_valid_paths == 1) { 1381 m->current_pgpath = NULL; 1382 run_queue = 1; 1383 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { 1384 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) 1385 atomic_inc(&m->pg_init_in_progress); 1386 } 1387 1388 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti, 1389 pgpath->path.dev->name, nr_valid_paths); 1390 1391 schedule_work(&m->trigger_event); 1392 1393 out: 1394 spin_unlock_irq(&m->lock); 1395 if (run_queue) { 1396 dm_table_run_md_queue_async(m->ti->table); 1397 process_queued_io_list(m); 1398 } 1399 1400 if (pgpath->is_active) 1401 disable_nopath_timeout(m); 1402 1403 return r; 1404 } 1405 1406 /* 1407 * Fail or reinstate all paths that match the provided struct dm_dev. 1408 */ 1409 static int action_dev(struct multipath *m, dev_t dev, action_fn action) 1410 { 1411 int r = -EINVAL; 1412 struct pgpath *pgpath; 1413 struct priority_group *pg; 1414 1415 list_for_each_entry(pg, &m->priority_groups, list) { 1416 list_for_each_entry(pgpath, &pg->pgpaths, list) { 1417 if (pgpath->path.dev->bdev->bd_dev == dev) 1418 r = action(pgpath); 1419 } 1420 } 1421 1422 return r; 1423 } 1424 1425 /* 1426 * Temporarily try to avoid having to use the specified PG 1427 */ 1428 static void bypass_pg(struct multipath *m, struct priority_group *pg, 1429 bool bypassed, bool can_be_delayed) 1430 { 1431 unsigned long flags; 1432 1433 spin_lock_irqsave(&m->lock, flags); 1434 1435 pg->bypassed = bypassed; 1436 if (can_be_delayed && test_bit(MPATHF_DELAY_PG_SWITCH, &m->flags)) 1437 set_bit(MPATHF_NEED_PG_SWITCH, &m->flags); 1438 else { 1439 m->current_pgpath = NULL; 1440 m->current_pg = NULL; 1441 } 1442 1443 spin_unlock_irqrestore(&m->lock, flags); 1444 1445 schedule_work(&m->trigger_event); 1446 } 1447 1448 /* 1449 * Switch to using the specified PG from the next I/O that gets mapped 1450 */ 1451 static int switch_pg_num(struct multipath *m, const char *pgstr) 1452 { 1453 struct priority_group *pg; 1454 unsigned int pgnum; 1455 char dummy; 1456 1457 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum || 1458 !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) { 1459 DMWARN("invalid PG number supplied to %s", __func__); 1460 return -EINVAL; 1461 } 1462 1463 spin_lock_irq(&m->lock); 1464 list_for_each_entry(pg, &m->priority_groups, list) { 1465 pg->bypassed = false; 1466 if (--pgnum) 1467 continue; 1468 1469 if (test_bit(MPATHF_DELAY_PG_SWITCH, &m->flags)) 1470 set_bit(MPATHF_NEED_PG_SWITCH, &m->flags); 1471 else { 1472 m->current_pgpath = NULL; 1473 m->current_pg = NULL; 1474 } 1475 m->next_pg = pg; 1476 } 1477 spin_unlock_irq(&m->lock); 1478 1479 schedule_work(&m->trigger_event); 1480 return 0; 1481 } 1482 1483 /* 1484 * Set/clear bypassed status of a PG. 1485 * PGs are numbered upwards from 1 in the order they were declared. 1486 */ 1487 static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed) 1488 { 1489 struct priority_group *pg; 1490 unsigned int pgnum; 1491 char dummy; 1492 1493 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum || 1494 !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) { 1495 DMWARN("invalid PG number supplied to bypass_pg"); 1496 return -EINVAL; 1497 } 1498 1499 list_for_each_entry(pg, &m->priority_groups, list) { 1500 if (!--pgnum) 1501 break; 1502 } 1503 1504 bypass_pg(m, pg, bypassed, true); 1505 return 0; 1506 } 1507 1508 /* 1509 * Should we retry pg_init immediately? 1510 */ 1511 static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath) 1512 { 1513 unsigned long flags; 1514 bool limit_reached = false; 1515 1516 spin_lock_irqsave(&m->lock, flags); 1517 1518 if (atomic_read(&m->pg_init_count) <= m->pg_init_retries && 1519 !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) 1520 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); 1521 else 1522 limit_reached = true; 1523 1524 spin_unlock_irqrestore(&m->lock, flags); 1525 1526 return limit_reached; 1527 } 1528 1529 static void pg_init_done(void *data, int errors) 1530 { 1531 struct pgpath *pgpath = data; 1532 struct priority_group *pg = pgpath->pg; 1533 struct multipath *m = pg->m; 1534 unsigned long flags; 1535 bool delay_retry = false; 1536 1537 /* device or driver problems */ 1538 switch (errors) { 1539 case SCSI_DH_OK: 1540 break; 1541 case SCSI_DH_NOSYS: 1542 if (!m->hw_handler_name) { 1543 errors = 0; 1544 break; 1545 } 1546 DMERR("Could not failover the device: Handler scsi_dh_%s " 1547 "Error %d.", m->hw_handler_name, errors); 1548 /* 1549 * Fail path for now, so we do not ping pong 1550 */ 1551 fail_path(pgpath); 1552 break; 1553 case SCSI_DH_DEV_TEMP_BUSY: 1554 /* 1555 * Probably doing something like FW upgrade on the 1556 * controller so try the other pg. 1557 */ 1558 bypass_pg(m, pg, true, false); 1559 break; 1560 case SCSI_DH_RETRY: 1561 /* Wait before retrying. */ 1562 delay_retry = true; 1563 fallthrough; 1564 case SCSI_DH_IMM_RETRY: 1565 case SCSI_DH_RES_TEMP_UNAVAIL: 1566 if (pg_init_limit_reached(m, pgpath)) 1567 fail_path(pgpath); 1568 errors = 0; 1569 break; 1570 case SCSI_DH_DEV_OFFLINED: 1571 default: 1572 /* 1573 * We probably do not want to fail the path for a device 1574 * error, but this is what the old dm did. In future 1575 * patches we can do more advanced handling. 1576 */ 1577 fail_path(pgpath); 1578 } 1579 1580 spin_lock_irqsave(&m->lock, flags); 1581 if (errors) { 1582 if (pgpath == m->current_pgpath) { 1583 DMERR("Could not failover device. Error %d.", errors); 1584 m->current_pgpath = NULL; 1585 m->current_pg = NULL; 1586 } 1587 } else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) 1588 pg->bypassed = false; 1589 1590 if (atomic_dec_return(&m->pg_init_in_progress) > 0) 1591 /* Activations of other paths are still on going */ 1592 goto out; 1593 1594 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) { 1595 if (delay_retry) 1596 set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags); 1597 else 1598 clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags); 1599 1600 if (__pg_init_all_paths(m)) 1601 goto out; 1602 } 1603 clear_bit(MPATHF_QUEUE_IO, &m->flags); 1604 1605 process_queued_io_list(m); 1606 1607 /* 1608 * Wake up any thread waiting to suspend. 1609 */ 1610 wake_up(&m->pg_init_wait); 1611 1612 out: 1613 spin_unlock_irqrestore(&m->lock, flags); 1614 } 1615 1616 static void activate_or_offline_path(struct pgpath *pgpath) 1617 { 1618 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); 1619 1620 if (pgpath->is_active && !blk_queue_dying(q)) 1621 scsi_dh_activate(q, pg_init_done, pgpath); 1622 else 1623 pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED); 1624 } 1625 1626 static void activate_path_work(struct work_struct *work) 1627 { 1628 struct pgpath *pgpath = 1629 container_of(work, struct pgpath, activate_path.work); 1630 1631 activate_or_offline_path(pgpath); 1632 } 1633 1634 static int multipath_end_io(struct dm_target *ti, struct request *clone, 1635 blk_status_t error, union map_info *map_context) 1636 { 1637 struct dm_mpath_io *mpio = get_mpio(map_context); 1638 struct pgpath *pgpath = mpio->pgpath; 1639 int r = DM_ENDIO_DONE; 1640 1641 /* 1642 * We don't queue any clone request inside the multipath target 1643 * during end I/O handling, since those clone requests don't have 1644 * bio clones. If we queue them inside the multipath target, 1645 * we need to make bio clones, that requires memory allocation. 1646 * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests 1647 * don't have bio clones.) 1648 * Instead of queueing the clone request here, we queue the original 1649 * request into dm core, which will remake a clone request and 1650 * clone bios for it and resubmit it later. 1651 */ 1652 if (error && blk_path_error(error)) { 1653 struct multipath *m = ti->private; 1654 1655 if (error == BLK_STS_RESOURCE) 1656 r = DM_ENDIO_DELAY_REQUEUE; 1657 else 1658 r = DM_ENDIO_REQUEUE; 1659 1660 if (pgpath) 1661 fail_path(pgpath); 1662 1663 if (!atomic_read(&m->nr_valid_paths) && 1664 !must_push_back_rq(m)) { 1665 if (error == BLK_STS_IOERR) 1666 dm_report_EIO(m); 1667 /* complete with the original error */ 1668 r = DM_ENDIO_DONE; 1669 } 1670 } 1671 1672 if (pgpath) { 1673 struct path_selector *ps = &pgpath->pg->ps; 1674 1675 if (ps->type->end_io) 1676 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes, 1677 clone->io_start_time_ns); 1678 } 1679 1680 return r; 1681 } 1682 1683 static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, 1684 blk_status_t *error) 1685 { 1686 struct multipath *m = ti->private; 1687 struct dm_mpath_io *mpio = get_mpio_from_bio(clone); 1688 struct pgpath *pgpath = mpio->pgpath; 1689 unsigned long flags; 1690 int r = DM_ENDIO_DONE; 1691 1692 if (!*error || !blk_path_error(*error)) 1693 goto done; 1694 1695 if (pgpath) 1696 fail_path(pgpath); 1697 1698 if (!atomic_read(&m->nr_valid_paths)) { 1699 spin_lock_irqsave(&m->lock, flags); 1700 if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { 1701 if (__must_push_back(m)) { 1702 r = DM_ENDIO_REQUEUE; 1703 } else { 1704 dm_report_EIO(m); 1705 *error = BLK_STS_IOERR; 1706 } 1707 spin_unlock_irqrestore(&m->lock, flags); 1708 goto done; 1709 } 1710 spin_unlock_irqrestore(&m->lock, flags); 1711 } 1712 1713 multipath_queue_bio(m, clone); 1714 r = DM_ENDIO_INCOMPLETE; 1715 done: 1716 if (pgpath) { 1717 struct path_selector *ps = &pgpath->pg->ps; 1718 1719 if (ps->type->end_io) 1720 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes, 1721 (mpio->start_time_ns ?: 1722 dm_start_time_ns_from_clone(clone))); 1723 } 1724 1725 return r; 1726 } 1727 1728 /* 1729 * Suspend with flush can't complete until all the I/O is processed 1730 * so if the last path fails we must error any remaining I/O. 1731 * - Note that if the freeze_bdev fails while suspending, the 1732 * queue_if_no_path state is lost - userspace should reset it. 1733 * Otherwise, during noflush suspend, queue_if_no_path will not change. 1734 */ 1735 static void multipath_presuspend(struct dm_target *ti) 1736 { 1737 struct multipath *m = ti->private; 1738 1739 spin_lock_irq(&m->lock); 1740 m->is_suspending = true; 1741 spin_unlock_irq(&m->lock); 1742 /* FIXME: bio-based shouldn't need to always disable queue_if_no_path */ 1743 if (m->queue_mode == DM_TYPE_BIO_BASED || !dm_noflush_suspending(m->ti)) 1744 queue_if_no_path(m, false, true, __func__); 1745 } 1746 1747 static void multipath_postsuspend(struct dm_target *ti) 1748 { 1749 struct multipath *m = ti->private; 1750 1751 mutex_lock(&m->work_mutex); 1752 flush_multipath_work(m); 1753 mutex_unlock(&m->work_mutex); 1754 } 1755 1756 /* 1757 * Restore the queue_if_no_path setting. 1758 */ 1759 static void multipath_resume(struct dm_target *ti) 1760 { 1761 struct multipath *m = ti->private; 1762 1763 spin_lock_irq(&m->lock); 1764 m->is_suspending = false; 1765 if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) { 1766 set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags); 1767 clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags); 1768 } 1769 1770 DMDEBUG("%s: %s finished; QIFNP = %d; SQIFNP = %d", 1771 dm_table_device_name(m->ti->table), __func__, 1772 test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags), 1773 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)); 1774 1775 spin_unlock_irq(&m->lock); 1776 } 1777 1778 /* 1779 * Info output has the following format: 1780 * num_multipath_feature_args [multipath_feature_args]* 1781 * num_handler_status_args [handler_status_args]* 1782 * num_groups init_group_number 1783 * [A|D|E num_ps_status_args [ps_status_args]* 1784 * num_paths num_selector_args 1785 * [path_dev A|F fail_count [selector_args]* ]+ ]+ 1786 * 1787 * Table output has the following format (identical to the constructor string): 1788 * num_feature_args [features_args]* 1789 * num_handler_args hw_handler [hw_handler_args]* 1790 * num_groups init_group_number 1791 * [priority selector-name num_ps_args [ps_args]* 1792 * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+ 1793 */ 1794 static void multipath_status(struct dm_target *ti, status_type_t type, 1795 unsigned int status_flags, char *result, unsigned int maxlen) 1796 { 1797 int sz = 0, pg_counter, pgpath_counter; 1798 struct multipath *m = ti->private; 1799 struct priority_group *pg; 1800 struct pgpath *p; 1801 unsigned int pg_num; 1802 char state; 1803 1804 spin_lock_irq(&m->lock); 1805 1806 /* Features */ 1807 if (type == STATUSTYPE_INFO) 1808 DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags), 1809 atomic_read(&m->pg_init_count)); 1810 else { 1811 DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) + 1812 (m->pg_init_retries > 0) * 2 + 1813 (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 + 1814 (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2); 1815 1816 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) 1817 DMEMIT("queue_if_no_path "); 1818 if (m->pg_init_retries) 1819 DMEMIT("pg_init_retries %u ", m->pg_init_retries); 1820 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) 1821 DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs); 1822 if (m->queue_mode != DM_TYPE_REQUEST_BASED) { 1823 switch (m->queue_mode) { 1824 case DM_TYPE_BIO_BASED: 1825 DMEMIT("queue_mode bio "); 1826 break; 1827 default: 1828 WARN_ON_ONCE(true); 1829 break; 1830 } 1831 } 1832 } 1833 1834 if (!m->hw_handler_name || type == STATUSTYPE_INFO) 1835 DMEMIT("0 "); 1836 else 1837 DMEMIT("1 %s ", m->hw_handler_name); 1838 1839 DMEMIT("%u ", m->nr_priority_groups); 1840 1841 if (m->current_pg) 1842 pg_num = m->current_pg->pg_num; 1843 else if (m->next_pg) 1844 pg_num = m->next_pg->pg_num; 1845 else 1846 pg_num = (m->nr_priority_groups ? 1 : 0); 1847 1848 DMEMIT("%u ", pg_num); 1849 1850 switch (type) { 1851 case STATUSTYPE_INFO: 1852 list_for_each_entry(pg, &m->priority_groups, list) { 1853 if (pg->bypassed) 1854 state = 'D'; /* Disabled */ 1855 else if (pg == m->current_pg) 1856 state = 'A'; /* Currently Active */ 1857 else 1858 state = 'E'; /* Enabled */ 1859 1860 DMEMIT("%c ", state); 1861 1862 if (pg->ps.type->status) 1863 sz += pg->ps.type->status(&pg->ps, NULL, type, 1864 result + sz, 1865 maxlen - sz); 1866 else 1867 DMEMIT("0 "); 1868 1869 DMEMIT("%u %u ", pg->nr_pgpaths, 1870 pg->ps.type->info_args); 1871 1872 list_for_each_entry(p, &pg->pgpaths, list) { 1873 DMEMIT("%s %s %u ", p->path.dev->name, 1874 p->is_active ? "A" : "F", 1875 p->fail_count); 1876 if (pg->ps.type->status) 1877 sz += pg->ps.type->status(&pg->ps, 1878 &p->path, type, result + sz, 1879 maxlen - sz); 1880 } 1881 } 1882 break; 1883 1884 case STATUSTYPE_TABLE: 1885 list_for_each_entry(pg, &m->priority_groups, list) { 1886 DMEMIT("%s ", pg->ps.type->name); 1887 1888 if (pg->ps.type->status) 1889 sz += pg->ps.type->status(&pg->ps, NULL, type, 1890 result + sz, 1891 maxlen - sz); 1892 else 1893 DMEMIT("0 "); 1894 1895 DMEMIT("%u %u ", pg->nr_pgpaths, 1896 pg->ps.type->table_args); 1897 1898 list_for_each_entry(p, &pg->pgpaths, list) { 1899 DMEMIT("%s ", p->path.dev->name); 1900 if (pg->ps.type->status) 1901 sz += pg->ps.type->status(&pg->ps, 1902 &p->path, type, result + sz, 1903 maxlen - sz); 1904 } 1905 } 1906 break; 1907 1908 case STATUSTYPE_IMA: 1909 sz = 0; /*reset the result pointer*/ 1910 1911 DMEMIT_TARGET_NAME_VERSION(ti->type); 1912 DMEMIT(",nr_priority_groups=%u", m->nr_priority_groups); 1913 1914 pg_counter = 0; 1915 list_for_each_entry(pg, &m->priority_groups, list) { 1916 if (pg->bypassed) 1917 state = 'D'; /* Disabled */ 1918 else if (pg == m->current_pg) 1919 state = 'A'; /* Currently Active */ 1920 else 1921 state = 'E'; /* Enabled */ 1922 DMEMIT(",pg_state_%d=%c", pg_counter, state); 1923 DMEMIT(",nr_pgpaths_%d=%u", pg_counter, pg->nr_pgpaths); 1924 DMEMIT(",path_selector_name_%d=%s", pg_counter, pg->ps.type->name); 1925 1926 pgpath_counter = 0; 1927 list_for_each_entry(p, &pg->pgpaths, list) { 1928 DMEMIT(",path_name_%d_%d=%s,is_active_%d_%d=%c,fail_count_%d_%d=%u", 1929 pg_counter, pgpath_counter, p->path.dev->name, 1930 pg_counter, pgpath_counter, p->is_active ? 'A' : 'F', 1931 pg_counter, pgpath_counter, p->fail_count); 1932 if (pg->ps.type->status) { 1933 DMEMIT(",path_selector_status_%d_%d=", 1934 pg_counter, pgpath_counter); 1935 sz += pg->ps.type->status(&pg->ps, &p->path, 1936 type, result + sz, 1937 maxlen - sz); 1938 } 1939 pgpath_counter++; 1940 } 1941 pg_counter++; 1942 } 1943 DMEMIT(";"); 1944 break; 1945 } 1946 1947 spin_unlock_irq(&m->lock); 1948 } 1949 1950 static int multipath_message(struct dm_target *ti, unsigned int argc, char **argv, 1951 char *result, unsigned int maxlen) 1952 { 1953 int r = -EINVAL; 1954 dev_t dev; 1955 struct multipath *m = ti->private; 1956 action_fn action; 1957 1958 mutex_lock(&m->work_mutex); 1959 1960 if (dm_suspended(ti)) { 1961 r = -EBUSY; 1962 goto out; 1963 } 1964 1965 if (argc == 1) { 1966 if (!strcasecmp(argv[0], "queue_if_no_path")) { 1967 r = queue_if_no_path(m, true, false, __func__); 1968 spin_lock_irq(&m->lock); 1969 enable_nopath_timeout(m); 1970 spin_unlock_irq(&m->lock); 1971 goto out; 1972 } else if (!strcasecmp(argv[0], "fail_if_no_path")) { 1973 r = queue_if_no_path(m, false, false, __func__); 1974 disable_nopath_timeout(m); 1975 goto out; 1976 } 1977 } 1978 1979 if (argc != 2) { 1980 DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc); 1981 goto out; 1982 } 1983 1984 if (!strcasecmp(argv[0], "disable_group")) { 1985 r = bypass_pg_num(m, argv[1], true); 1986 goto out; 1987 } else if (!strcasecmp(argv[0], "enable_group")) { 1988 r = bypass_pg_num(m, argv[1], false); 1989 goto out; 1990 } else if (!strcasecmp(argv[0], "switch_group")) { 1991 r = switch_pg_num(m, argv[1]); 1992 goto out; 1993 } else if (!strcasecmp(argv[0], "reinstate_path")) 1994 action = reinstate_path; 1995 else if (!strcasecmp(argv[0], "fail_path")) 1996 action = fail_path; 1997 else { 1998 DMWARN("Unrecognised multipath message received: %s", argv[0]); 1999 goto out; 2000 } 2001 2002 r = dm_devt_from_path(argv[1], &dev); 2003 if (r) { 2004 DMWARN("message: error getting device %s", 2005 argv[1]); 2006 goto out; 2007 } 2008 2009 r = action_dev(m, dev, action); 2010 2011 out: 2012 mutex_unlock(&m->work_mutex); 2013 return r; 2014 } 2015 2016 /* 2017 * Perform a minimal read from the given path to find out whether the 2018 * path still works. If a path error occurs, fail it. 2019 */ 2020 static int probe_path(struct pgpath *pgpath) 2021 { 2022 struct block_device *bdev = pgpath->path.dev->bdev; 2023 unsigned int read_size = bdev_logical_block_size(bdev); 2024 struct page *page; 2025 struct bio *bio; 2026 blk_status_t status; 2027 int r = 0; 2028 2029 if (WARN_ON_ONCE(read_size > PAGE_SIZE)) 2030 return -EINVAL; 2031 2032 page = alloc_page(GFP_KERNEL); 2033 if (!page) 2034 return -ENOMEM; 2035 2036 /* Perform a minimal read: Sector 0, length read_size */ 2037 bio = bio_alloc(bdev, 1, REQ_OP_READ, GFP_KERNEL); 2038 if (!bio) { 2039 r = -ENOMEM; 2040 goto out; 2041 } 2042 2043 bio->bi_iter.bi_sector = 0; 2044 __bio_add_page(bio, page, read_size, 0); 2045 submit_bio_wait(bio); 2046 status = bio->bi_status; 2047 bio_put(bio); 2048 2049 if (status && blk_path_error(status)) 2050 fail_path(pgpath); 2051 2052 out: 2053 __free_page(page); 2054 return r; 2055 } 2056 2057 /* 2058 * Probe all active paths in current_pg to find out whether they still work. 2059 * Fail all paths that do not work. 2060 * 2061 * Return -ENOTCONN if no valid path is left (even outside of current_pg). We 2062 * cannot probe paths in other pgs without switching current_pg, so if valid 2063 * paths are only in different pgs, they may or may not work. Additionally 2064 * we should not probe paths in a pathgroup that is in the process of 2065 * Initializing. Userspace can submit a request and we'll switch and wait 2066 * for the pathgroup to be initialized. If the request fails, it may need to 2067 * probe again. 2068 */ 2069 static int probe_active_paths(struct multipath *m) 2070 { 2071 struct pgpath *pgpath; 2072 struct priority_group *pg = NULL; 2073 int r = 0; 2074 2075 spin_lock_irq(&m->lock); 2076 if (test_bit(MPATHF_DELAY_PG_SWITCH, &m->flags)) { 2077 wait_event_lock_irq(m->probe_wait, 2078 !test_bit(MPATHF_DELAY_PG_SWITCH, &m->flags), 2079 m->lock); 2080 /* 2081 * if we waited because a probe was already in progress, 2082 * and it probed the current active pathgroup, don't 2083 * reprobe. Just return the number of valid paths 2084 */ 2085 if (m->current_pg == m->last_probed_pg) 2086 goto skip_probe; 2087 } 2088 if (!m->current_pg || m->is_suspending || 2089 test_bit(MPATHF_QUEUE_IO, &m->flags)) 2090 goto skip_probe; 2091 set_bit(MPATHF_DELAY_PG_SWITCH, &m->flags); 2092 pg = m->last_probed_pg = m->current_pg; 2093 spin_unlock_irq(&m->lock); 2094 2095 list_for_each_entry(pgpath, &pg->pgpaths, list) { 2096 if (pg != READ_ONCE(m->current_pg) || 2097 READ_ONCE(m->is_suspending)) 2098 goto out; 2099 if (!pgpath->is_active) 2100 continue; 2101 2102 r = probe_path(pgpath); 2103 if (r < 0) 2104 goto out; 2105 } 2106 2107 out: 2108 spin_lock_irq(&m->lock); 2109 clear_bit(MPATHF_DELAY_PG_SWITCH, &m->flags); 2110 if (test_and_clear_bit(MPATHF_NEED_PG_SWITCH, &m->flags)) { 2111 m->current_pgpath = NULL; 2112 m->current_pg = NULL; 2113 } 2114 skip_probe: 2115 if (r == 0 && !atomic_read(&m->nr_valid_paths)) 2116 r = -ENOTCONN; 2117 spin_unlock_irq(&m->lock); 2118 if (pg) 2119 wake_up(&m->probe_wait); 2120 return r; 2121 } 2122 2123 static int multipath_prepare_ioctl(struct dm_target *ti, 2124 struct block_device **bdev, 2125 unsigned int cmd, unsigned long arg, 2126 bool *forward) 2127 { 2128 struct multipath *m = ti->private; 2129 struct pgpath *pgpath; 2130 int r; 2131 2132 if (_IOC_TYPE(cmd) == DM_IOCTL) { 2133 *forward = false; 2134 switch (cmd) { 2135 case DM_MPATH_PROBE_PATHS: 2136 return probe_active_paths(m); 2137 default: 2138 return -ENOTTY; 2139 } 2140 } 2141 2142 pgpath = READ_ONCE(m->current_pgpath); 2143 if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) 2144 pgpath = choose_pgpath(m, 0); 2145 2146 if (pgpath) { 2147 if (!mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) { 2148 *bdev = pgpath->path.dev->bdev; 2149 r = 0; 2150 } else { 2151 /* pg_init has not started or completed */ 2152 r = -ENOTCONN; 2153 } 2154 } else { 2155 /* No path is available */ 2156 r = -EIO; 2157 spin_lock_irq(&m->lock); 2158 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) 2159 r = -ENOTCONN; 2160 spin_unlock_irq(&m->lock); 2161 } 2162 2163 if (r == -ENOTCONN) { 2164 if (!READ_ONCE(m->current_pg)) { 2165 /* Path status changed, redo selection */ 2166 (void) choose_pgpath(m, 0); 2167 } 2168 spin_lock_irq(&m->lock); 2169 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) 2170 (void) __pg_init_all_paths(m); 2171 spin_unlock_irq(&m->lock); 2172 dm_table_run_md_queue_async(m->ti->table); 2173 process_queued_io_list(m); 2174 } 2175 2176 /* 2177 * Only pass ioctls through if the device sizes match exactly. 2178 */ 2179 if (!r && ti->len != bdev_nr_sectors((*bdev))) 2180 return 1; 2181 return r; 2182 } 2183 2184 static int multipath_iterate_devices(struct dm_target *ti, 2185 iterate_devices_callout_fn fn, void *data) 2186 { 2187 struct multipath *m = ti->private; 2188 struct priority_group *pg; 2189 struct pgpath *p; 2190 int ret = 0; 2191 2192 list_for_each_entry(pg, &m->priority_groups, list) { 2193 list_for_each_entry(p, &pg->pgpaths, list) { 2194 ret = fn(ti, p->path.dev, ti->begin, ti->len, data); 2195 if (ret) 2196 goto out; 2197 } 2198 } 2199 2200 out: 2201 return ret; 2202 } 2203 2204 static int pgpath_busy(struct pgpath *pgpath) 2205 { 2206 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); 2207 2208 return blk_lld_busy(q); 2209 } 2210 2211 /* 2212 * We return "busy", only when we can map I/Os but underlying devices 2213 * are busy (so even if we map I/Os now, the I/Os will wait on 2214 * the underlying queue). 2215 * In other words, if we want to kill I/Os or queue them inside us 2216 * due to map unavailability, we don't return "busy". Otherwise, 2217 * dm core won't give us the I/Os and we can't do what we want. 2218 */ 2219 static int multipath_busy(struct dm_target *ti) 2220 { 2221 bool busy = false, has_active = false; 2222 struct multipath *m = ti->private; 2223 struct priority_group *pg, *next_pg; 2224 struct pgpath *pgpath; 2225 2226 /* pg_init in progress */ 2227 if (atomic_read(&m->pg_init_in_progress)) 2228 return true; 2229 2230 /* no paths available, for blk-mq: rely on IO mapping to delay requeue */ 2231 if (!atomic_read(&m->nr_valid_paths)) { 2232 unsigned long flags; 2233 2234 spin_lock_irqsave(&m->lock, flags); 2235 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { 2236 spin_unlock_irqrestore(&m->lock, flags); 2237 return (m->queue_mode != DM_TYPE_REQUEST_BASED); 2238 } 2239 spin_unlock_irqrestore(&m->lock, flags); 2240 } 2241 2242 /* Guess which priority_group will be used at next mapping time */ 2243 pg = READ_ONCE(m->current_pg); 2244 next_pg = READ_ONCE(m->next_pg); 2245 if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg)) 2246 pg = next_pg; 2247 2248 if (!pg) { 2249 /* 2250 * We don't know which pg will be used at next mapping time. 2251 * We don't call choose_pgpath() here to avoid to trigger 2252 * pg_init just by busy checking. 2253 * So we don't know whether underlying devices we will be using 2254 * at next mapping time are busy or not. Just try mapping. 2255 */ 2256 return busy; 2257 } 2258 2259 /* 2260 * If there is one non-busy active path at least, the path selector 2261 * will be able to select it. So we consider such a pg as not busy. 2262 */ 2263 busy = true; 2264 list_for_each_entry(pgpath, &pg->pgpaths, list) { 2265 if (pgpath->is_active) { 2266 has_active = true; 2267 if (!pgpath_busy(pgpath)) { 2268 busy = false; 2269 break; 2270 } 2271 } 2272 } 2273 2274 if (!has_active) { 2275 /* 2276 * No active path in this pg, so this pg won't be used and 2277 * the current_pg will be changed at next mapping time. 2278 * We need to try mapping to determine it. 2279 */ 2280 busy = false; 2281 } 2282 2283 return busy; 2284 } 2285 2286 /* 2287 *--------------------------------------------------------------- 2288 * Module setup 2289 *--------------------------------------------------------------- 2290 */ 2291 static struct target_type multipath_target = { 2292 .name = "multipath", 2293 .version = {1, 15, 0}, 2294 .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE | 2295 DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ATOMIC_WRITES, 2296 .module = THIS_MODULE, 2297 .ctr = multipath_ctr, 2298 .dtr = multipath_dtr, 2299 .clone_and_map_rq = multipath_clone_and_map, 2300 .release_clone_rq = multipath_release_clone, 2301 .rq_end_io = multipath_end_io, 2302 .map = multipath_map_bio, 2303 .end_io = multipath_end_io_bio, 2304 .presuspend = multipath_presuspend, 2305 .postsuspend = multipath_postsuspend, 2306 .resume = multipath_resume, 2307 .status = multipath_status, 2308 .message = multipath_message, 2309 .prepare_ioctl = multipath_prepare_ioctl, 2310 .iterate_devices = multipath_iterate_devices, 2311 .busy = multipath_busy, 2312 }; 2313 2314 static int __init dm_multipath_init(void) 2315 { 2316 int r = -ENOMEM; 2317 2318 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0); 2319 if (!kmultipathd) { 2320 DMERR("failed to create workqueue kmpathd"); 2321 goto bad_alloc_kmultipathd; 2322 } 2323 2324 /* 2325 * A separate workqueue is used to handle the device handlers 2326 * to avoid overloading existing workqueue. Overloading the 2327 * old workqueue would also create a bottleneck in the 2328 * path of the storage hardware device activation. 2329 */ 2330 kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd", 2331 WQ_MEM_RECLAIM); 2332 if (!kmpath_handlerd) { 2333 DMERR("failed to create workqueue kmpath_handlerd"); 2334 goto bad_alloc_kmpath_handlerd; 2335 } 2336 2337 dm_mpath_wq = alloc_workqueue("dm_mpath_wq", 0, 0); 2338 if (!dm_mpath_wq) { 2339 DMERR("failed to create workqueue dm_mpath_wq"); 2340 goto bad_alloc_dm_mpath_wq; 2341 } 2342 2343 r = dm_register_target(&multipath_target); 2344 if (r < 0) 2345 goto bad_register_target; 2346 2347 return 0; 2348 2349 bad_register_target: 2350 destroy_workqueue(dm_mpath_wq); 2351 bad_alloc_dm_mpath_wq: 2352 destroy_workqueue(kmpath_handlerd); 2353 bad_alloc_kmpath_handlerd: 2354 destroy_workqueue(kmultipathd); 2355 bad_alloc_kmultipathd: 2356 return r; 2357 } 2358 2359 static void __exit dm_multipath_exit(void) 2360 { 2361 destroy_workqueue(dm_mpath_wq); 2362 destroy_workqueue(kmpath_handlerd); 2363 destroy_workqueue(kmultipathd); 2364 2365 dm_unregister_target(&multipath_target); 2366 } 2367 2368 module_init(dm_multipath_init); 2369 module_exit(dm_multipath_exit); 2370 2371 module_param_named(queue_if_no_path_timeout_secs, queue_if_no_path_timeout_secs, ulong, 0644); 2372 MODULE_PARM_DESC(queue_if_no_path_timeout_secs, "No available paths queue IO timeout in seconds"); 2373 2374 MODULE_DESCRIPTION(DM_NAME " multipath target"); 2375 MODULE_AUTHOR("Sistina Software <dm-devel@lists.linux.dev>"); 2376 MODULE_LICENSE("GPL"); 2377