Lines Matching refs:q

62 	struct request_queue *q = rq->q;  in elv_iosched_allow_bio_merge()  local
63 struct elevator_queue *e = q->elevator; in elv_iosched_allow_bio_merge()
66 return e->type->ops.allow_merge(q, rq, bio); in elv_iosched_allow_bio_merge()
123 struct elevator_queue *elevator_alloc(struct request_queue *q, in elevator_alloc() argument
128 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); in elevator_alloc()
152 static void elevator_exit(struct request_queue *q) in elevator_exit() argument
154 struct elevator_queue *e = q->elevator; in elevator_exit()
156 lockdep_assert_held(&q->elevator_lock); in elevator_exit()
158 ioc_clear_queue(q); in elevator_exit()
161 blk_mq_exit_sched(q, e); in elevator_exit()
171 void elv_rqhash_del(struct request_queue *q, struct request *rq) in elv_rqhash_del() argument
178 void elv_rqhash_add(struct request_queue *q, struct request *rq) in elv_rqhash_add() argument
180 struct elevator_queue *e = q->elevator; in elv_rqhash_add()
188 void elv_rqhash_reposition(struct request_queue *q, struct request *rq) in elv_rqhash_reposition() argument
191 elv_rqhash_add(q, rq); in elv_rqhash_reposition()
194 struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) in elv_rqhash_find() argument
196 struct elevator_queue *e = q->elevator; in elv_rqhash_find()
268 enum elv_merge elv_merge(struct request_queue *q, struct request **req, in elv_merge() argument
271 struct elevator_queue *e = q->elevator; in elv_merge()
280 if (blk_queue_nomerges(q) || !bio_mergeable(bio)) in elv_merge()
286 if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) { in elv_merge()
287 enum elv_merge ret = blk_try_merge(q->last_merge, bio); in elv_merge()
290 *req = q->last_merge; in elv_merge()
295 if (blk_queue_noxmerges(q)) in elv_merge()
301 __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector); in elv_merge()
311 return e->type->ops.request_merge(q, req, bio); in elv_merge()
324 bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq, in elv_attempt_insert_merge() argument
330 if (blk_queue_nomerges(q)) in elv_attempt_insert_merge()
336 if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) { in elv_attempt_insert_merge()
341 if (blk_queue_noxmerges(q)) in elv_attempt_insert_merge()
349 __rq = elv_rqhash_find(q, blk_rq_pos(rq)); in elv_attempt_insert_merge()
350 if (!__rq || !blk_attempt_req_merge(q, __rq, rq)) in elv_attempt_insert_merge()
362 void elv_merged_request(struct request_queue *q, struct request *rq, in elv_merged_request() argument
365 struct elevator_queue *e = q->elevator; in elv_merged_request()
368 e->type->ops.request_merged(q, rq, type); in elv_merged_request()
371 elv_rqhash_reposition(q, rq); in elv_merged_request()
373 q->last_merge = rq; in elv_merged_request()
376 void elv_merge_requests(struct request_queue *q, struct request *rq, in elv_merge_requests() argument
379 struct elevator_queue *e = q->elevator; in elv_merge_requests()
382 e->type->ops.requests_merged(q, rq, next); in elv_merge_requests()
384 elv_rqhash_reposition(q, rq); in elv_merge_requests()
385 q->last_merge = rq; in elv_merge_requests()
388 struct request *elv_latter_request(struct request_queue *q, struct request *rq) in elv_latter_request() argument
390 struct elevator_queue *e = q->elevator; in elv_latter_request()
393 return e->type->ops.next_request(q, rq); in elv_latter_request()
398 struct request *elv_former_request(struct request_queue *q, struct request *rq) in elv_former_request() argument
400 struct elevator_queue *e = q->elevator; in elv_former_request()
403 return e->type->ops.former_request(q, rq); in elv_former_request()
457 static int elv_register_queue(struct request_queue *q, in elv_register_queue() argument
463 error = kobject_add(&e->kobj, &q->disk->queue_kobj, "iosched"); in elv_register_queue()
480 blk_mq_sched_reg_debugfs(q); in elv_register_queue()
486 static void elv_unregister_queue(struct request_queue *q, in elv_unregister_queue() argument
494 blk_mq_sched_unreg_debugfs(q); in elv_unregister_queue()
562 static int elevator_switch(struct request_queue *q, struct elv_change_ctx *ctx) in elevator_switch() argument
567 WARN_ON_ONCE(q->mq_freeze_depth == 0); in elevator_switch()
568 lockdep_assert_held(&q->elevator_lock); in elevator_switch()
576 blk_mq_quiesce_queue(q); in elevator_switch()
578 if (q->elevator) { in elevator_switch()
579 ctx->old = q->elevator; in elevator_switch()
580 elevator_exit(q); in elevator_switch()
584 ret = blk_mq_init_sched(q, new_e, &ctx->res); in elevator_switch()
587 ctx->new = q->elevator; in elevator_switch()
589 blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q); in elevator_switch()
590 q->elevator = NULL; in elevator_switch()
591 q->nr_requests = q->tag_set->queue_depth; in elevator_switch()
593 blk_add_trace_msg(q, "elv switch: %s", ctx->name); in elevator_switch()
596 blk_mq_unquiesce_queue(q); in elevator_switch()
609 struct request_queue *q) in elv_exit_and_release() argument
614 memflags = blk_mq_freeze_queue(q); in elv_exit_and_release()
615 mutex_lock(&q->elevator_lock); in elv_exit_and_release()
616 e = q->elevator; in elv_exit_and_release()
617 elevator_exit(q); in elv_exit_and_release()
618 mutex_unlock(&q->elevator_lock); in elv_exit_and_release()
619 blk_mq_unfreeze_queue(q, memflags); in elv_exit_and_release()
621 blk_mq_free_sched_res(&ctx->res, ctx->type, q->tag_set); in elv_exit_and_release()
626 static int elevator_change_done(struct request_queue *q, in elevator_change_done() argument
637 elv_unregister_queue(q, ctx->old); in elevator_change_done()
638 blk_mq_free_sched_res(&res, ctx->old->type, q->tag_set); in elevator_change_done()
642 ret = elv_register_queue(q, ctx->new, !ctx->no_uevent); in elevator_change_done()
644 elv_exit_and_release(ctx, q); in elevator_change_done()
652 static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx) in elevator_change() argument
655 struct blk_mq_tag_set *set = q->tag_set; in elevator_change()
661 ret = blk_mq_alloc_sched_res(q, ctx->type, &ctx->res, in elevator_change()
667 memflags = blk_mq_freeze_queue(q); in elevator_change()
677 blk_mq_cancel_work_sync(q); in elevator_change()
678 mutex_lock(&q->elevator_lock); in elevator_change()
679 if (!(q->elevator && elevator_match(q->elevator->type, ctx->name))) in elevator_change()
680 ret = elevator_switch(q, ctx); in elevator_change()
681 mutex_unlock(&q->elevator_lock); in elevator_change()
682 blk_mq_unfreeze_queue(q, memflags); in elevator_change()
684 ret = elevator_change_done(q, ctx); in elevator_change()
699 void elv_update_nr_hw_queues(struct request_queue *q, in elv_update_nr_hw_queues() argument
702 struct blk_mq_tag_set *set = q->tag_set; in elv_update_nr_hw_queues()
705 WARN_ON_ONCE(q->mq_freeze_depth == 0); in elv_update_nr_hw_queues()
707 if (ctx->type && !blk_queue_dying(q) && blk_queue_registered(q)) { in elv_update_nr_hw_queues()
708 mutex_lock(&q->elevator_lock); in elv_update_nr_hw_queues()
710 ret = elevator_switch(q, ctx); in elv_update_nr_hw_queues()
711 mutex_unlock(&q->elevator_lock); in elv_update_nr_hw_queues()
713 blk_mq_unfreeze_queue_nomemrestore(q); in elv_update_nr_hw_queues()
715 WARN_ON_ONCE(elevator_change_done(q, ctx)); in elv_update_nr_hw_queues()
728 void elevator_set_default(struct request_queue *q) in elevator_set_default() argument
737 blk_queue_flag_clear(QUEUE_FLAG_NO_ELV_SWITCH, q); in elevator_set_default()
739 if (q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT) in elevator_set_default()
751 if ((q->nr_hw_queues == 1 || in elevator_set_default()
752 blk_mq_is_shared_tags(q->tag_set->flags))) { in elevator_set_default()
753 err = elevator_change(q, &ctx); in elevator_set_default()
761 void elevator_set_none(struct request_queue *q) in elevator_set_none() argument
768 err = elevator_change(q, &ctx); in elevator_set_none()
791 struct request_queue *q = disk->queue; in elv_iosched_store() local
792 struct blk_mq_tag_set *set = q->tag_set; in elv_iosched_store()
795 if (!blk_queue_registered(q)) in elv_iosched_store()
810 if (!blk_queue_no_elv_switch(q)) { in elv_iosched_store()
811 ret = elevator_change(q, &ctx); in elv_iosched_store()
826 struct request_queue *q = disk->queue; in elv_iosched_show() local
830 mutex_lock(&q->elevator_lock); in elv_iosched_show()
831 if (!q->elevator) { in elv_iosched_show()
835 cur = q->elevator->type; in elv_iosched_show()
848 mutex_unlock(&q->elevator_lock); in elv_iosched_show()
853 struct request *elv_rb_former_request(struct request_queue *q, in elv_rb_former_request() argument
865 struct request *elv_rb_latter_request(struct request_queue *q, in elv_rb_latter_request() argument