blk-mq-sched.c (63064be150e4b1ba1e4af594ef5aa81adf21a52d) | blk-mq-sched.c (e155b0c238b20f0a866f4334d292656665836c8a) |
---|---|
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * blk-mq scheduling framework 4 * 5 * Copyright (C) 2016 Jens Axboe 6 */ 7#include <linux/kernel.h> 8#include <linux/module.h> --- 505 unchanged lines hidden (view full) --- 514 out: 515 percpu_ref_put(&q->q_usage_counter); 516} 517 518static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, 519 struct blk_mq_hw_ctx *hctx, 520 unsigned int hctx_idx) 521{ | 1// SPDX-License-Identifier: GPL-2.0 2/* 3 * blk-mq scheduling framework 4 * 5 * Copyright (C) 2016 Jens Axboe 6 */ 7#include <linux/kernel.h> 8#include <linux/module.h> --- 505 unchanged lines hidden (view full) --- 514 out: 515 percpu_ref_put(&q->q_usage_counter); 516} 517 518static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, 519 struct blk_mq_hw_ctx *hctx, 520 unsigned int hctx_idx) 521{ |
522 if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) { 523 hctx->sched_tags = q->shared_sbitmap_tags; 524 return 0; 525 } 526 |
|
522 hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx, 523 q->nr_requests); 524 525 if (!hctx->sched_tags) 526 return -ENOMEM; 527 return 0; 528} 529 | 527 hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx, 528 q->nr_requests); 529 530 if (!hctx->sched_tags) 531 return -ENOMEM; 532 return 0; 533} 534 |
535static void blk_mq_exit_sched_shared_sbitmap(struct request_queue *queue) 536{ 537 blk_mq_free_rq_map(queue->shared_sbitmap_tags); 538 queue->shared_sbitmap_tags = NULL; 539} 540 |
|
530/* called in queue's release handler, tagset has gone away */ | 541/* called in queue's release handler, tagset has gone away */ |
531static void blk_mq_sched_tags_teardown(struct request_queue *q) | 542static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags) |
532{ 533 struct blk_mq_hw_ctx *hctx; 534 int i; 535 536 queue_for_each_hw_ctx(q, hctx, i) { 537 if (hctx->sched_tags) { | 543{ 544 struct blk_mq_hw_ctx *hctx; 545 int i; 546 547 queue_for_each_hw_ctx(q, hctx, i) { 548 if (hctx->sched_tags) { |
538 blk_mq_free_rq_map(hctx->sched_tags, hctx->flags); | 549 if (!blk_mq_is_sbitmap_shared(q->tag_set->flags)) 550 blk_mq_free_rq_map(hctx->sched_tags); |
539 hctx->sched_tags = NULL; 540 } 541 } | 551 hctx->sched_tags = NULL; 552 } 553 } |
554 555 if (blk_mq_is_sbitmap_shared(flags)) 556 blk_mq_exit_sched_shared_sbitmap(q); |
|
542} 543 544static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue) 545{ 546 struct blk_mq_tag_set *set = queue->tag_set; | 557} 558 559static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue) 560{ 561 struct blk_mq_tag_set *set = queue->tag_set; |
547 int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags); 548 struct blk_mq_hw_ctx *hctx; 549 int ret, i; | |
550 551 /* 552 * Set initial depth at max so that we don't need to reallocate for 553 * updating nr_requests. 554 */ | 562 563 /* 564 * Set initial depth at max so that we don't need to reallocate for 565 * updating nr_requests. 566 */ |
555 ret = blk_mq_init_bitmaps(&queue->sched_bitmap_tags, 556 &queue->sched_breserved_tags, 557 MAX_SCHED_RQ, set->reserved_tags, 558 set->numa_node, alloc_policy); 559 if (ret) 560 return ret; | 567 queue->shared_sbitmap_tags = blk_mq_alloc_map_and_rqs(set, 568 BLK_MQ_NO_HCTX_IDX, 569 MAX_SCHED_RQ); 570 if (!queue->shared_sbitmap_tags) 571 return -ENOMEM; |
561 | 572 |
562 queue_for_each_hw_ctx(queue, hctx, i) { 563 hctx->sched_tags->bitmap_tags = 564 &queue->sched_bitmap_tags; 565 hctx->sched_tags->breserved_tags = 566 &queue->sched_breserved_tags; 567 } 568 | |
569 blk_mq_tag_update_sched_shared_sbitmap(queue); 570 571 return 0; 572} 573 | 573 blk_mq_tag_update_sched_shared_sbitmap(queue); 574 575 return 0; 576} 577 |
574static void blk_mq_exit_sched_shared_sbitmap(struct request_queue *queue) 575{ 576 sbitmap_queue_free(&queue->sched_bitmap_tags); 577 sbitmap_queue_free(&queue->sched_breserved_tags); 578} 579 | |
580int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) 581{ | 578int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) 579{ |
580 unsigned int i, flags = q->tag_set->flags; |
|
582 struct blk_mq_hw_ctx *hctx; 583 struct elevator_queue *eq; | 581 struct blk_mq_hw_ctx *hctx; 582 struct elevator_queue *eq; |
584 unsigned int i; | |
585 int ret; 586 587 if (!e) { 588 q->elevator = NULL; 589 q->nr_requests = q->tag_set->queue_depth; 590 return 0; 591 } 592 593 /* 594 * Default to double of smaller one between hw queue_depth and 128, 595 * since we don't split into sync/async like the old code did. 596 * Additionally, this is a per-hw queue depth. 597 */ 598 q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, 599 BLKDEV_DEFAULT_RQ); 600 | 583 int ret; 584 585 if (!e) { 586 q->elevator = NULL; 587 q->nr_requests = q->tag_set->queue_depth; 588 return 0; 589 } 590 591 /* 592 * Default to double of smaller one between hw queue_depth and 128, 593 * since we don't split into sync/async like the old code did. 594 * Additionally, this is a per-hw queue depth. 595 */ 596 q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, 597 BLKDEV_DEFAULT_RQ); 598 |
601 queue_for_each_hw_ctx(q, hctx, i) { 602 ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i); | 599 if (blk_mq_is_sbitmap_shared(flags)) { 600 ret = blk_mq_init_sched_shared_sbitmap(q); |
603 if (ret) | 601 if (ret) |
604 goto err_free_map_and_rqs; | 602 return ret; |
605 } 606 | 603 } 604 |
607 if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) { 608 ret = blk_mq_init_sched_shared_sbitmap(q); | 605 queue_for_each_hw_ctx(q, hctx, i) { 606 ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i); |
609 if (ret) 610 goto err_free_map_and_rqs; 611 } 612 613 ret = e->ops.init_sched(q, e); 614 if (ret) | 607 if (ret) 608 goto err_free_map_and_rqs; 609 } 610 611 ret = e->ops.init_sched(q, e); 612 if (ret) |
615 goto err_free_sbitmap; | 613 goto err_free_map_and_rqs; |
616 617 blk_mq_debugfs_register_sched(q); 618 619 queue_for_each_hw_ctx(q, hctx, i) { 620 if (e->ops.init_hctx) { 621 ret = e->ops.init_hctx(hctx, i); 622 if (ret) { 623 eq = q->elevator; 624 blk_mq_sched_free_rqs(q); 625 blk_mq_exit_sched(q, eq); 626 kobject_put(&eq->kobj); 627 return ret; 628 } 629 } 630 blk_mq_debugfs_register_sched_hctx(q, hctx); 631 } 632 633 return 0; 634 | 614 615 blk_mq_debugfs_register_sched(q); 616 617 queue_for_each_hw_ctx(q, hctx, i) { 618 if (e->ops.init_hctx) { 619 ret = e->ops.init_hctx(hctx, i); 620 if (ret) { 621 eq = q->elevator; 622 blk_mq_sched_free_rqs(q); 623 blk_mq_exit_sched(q, eq); 624 kobject_put(&eq->kobj); 625 return ret; 626 } 627 } 628 blk_mq_debugfs_register_sched_hctx(q, hctx); 629 } 630 631 return 0; 632 |
635err_free_sbitmap: 636 if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) 637 blk_mq_exit_sched_shared_sbitmap(q); | |
638err_free_map_and_rqs: 639 blk_mq_sched_free_rqs(q); | 633err_free_map_and_rqs: 634 blk_mq_sched_free_rqs(q); |
640 blk_mq_sched_tags_teardown(q); | 635 blk_mq_sched_tags_teardown(q, flags); 636 |
641 q->elevator = NULL; 642 return ret; 643} 644 645/* 646 * called in either blk_queue_cleanup or elevator_switch, tagset 647 * is required for freeing requests 648 */ 649void blk_mq_sched_free_rqs(struct request_queue *q) 650{ 651 struct blk_mq_hw_ctx *hctx; 652 int i; 653 | 637 q->elevator = NULL; 638 return ret; 639} 640 641/* 642 * called in either blk_queue_cleanup or elevator_switch, tagset 643 * is required for freeing requests 644 */ 645void blk_mq_sched_free_rqs(struct request_queue *q) 646{ 647 struct blk_mq_hw_ctx *hctx; 648 int i; 649 |
654 queue_for_each_hw_ctx(q, hctx, i) { 655 if (hctx->sched_tags) 656 blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i); | 650 if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) { 651 blk_mq_free_rqs(q->tag_set, q->shared_sbitmap_tags, 652 BLK_MQ_NO_HCTX_IDX); 653 } else { 654 queue_for_each_hw_ctx(q, hctx, i) { 655 if (hctx->sched_tags) 656 blk_mq_free_rqs(q->tag_set, 657 hctx->sched_tags, i); 658 } |
657 } 658} 659 660void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) 661{ 662 struct blk_mq_hw_ctx *hctx; 663 unsigned int i; 664 unsigned int flags = 0; --- 4 unchanged lines hidden (view full) --- 669 e->type->ops.exit_hctx(hctx, i); 670 hctx->sched_data = NULL; 671 } 672 flags = hctx->flags; 673 } 674 blk_mq_debugfs_unregister_sched(q); 675 if (e->type->ops.exit_sched) 676 e->type->ops.exit_sched(e); | 659 } 660} 661 662void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) 663{ 664 struct blk_mq_hw_ctx *hctx; 665 unsigned int i; 666 unsigned int flags = 0; --- 4 unchanged lines hidden (view full) --- 671 e->type->ops.exit_hctx(hctx, i); 672 hctx->sched_data = NULL; 673 } 674 flags = hctx->flags; 675 } 676 blk_mq_debugfs_unregister_sched(q); 677 if (e->type->ops.exit_sched) 678 e->type->ops.exit_sched(e); |
677 blk_mq_sched_tags_teardown(q); 678 if (blk_mq_is_sbitmap_shared(flags)) 679 blk_mq_exit_sched_shared_sbitmap(q); | 679 blk_mq_sched_tags_teardown(q, flags); |
680 q->elevator = NULL; 681} | 680 q->elevator = NULL; 681} |