Lines Matching refs:request_queue
53 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
115 static bool blk_freeze_set_owner(struct request_queue *q, in blk_freeze_set_owner()
137 static bool blk_unfreeze_check_owner(struct request_queue *q) in blk_unfreeze_check_owner()
150 static bool blk_freeze_set_owner(struct request_queue *q, in blk_freeze_set_owner()
156 static bool blk_unfreeze_check_owner(struct request_queue *q) in blk_unfreeze_check_owner()
162 bool __blk_freeze_queue_start(struct request_queue *q, in __blk_freeze_queue_start()
181 void blk_freeze_queue_start(struct request_queue *q) in blk_freeze_queue_start()
188 void blk_mq_freeze_queue_wait(struct request_queue *q) in blk_mq_freeze_queue_wait()
194 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, in blk_mq_freeze_queue_wait_timeout()
203 void blk_mq_freeze_queue_nomemsave(struct request_queue *q) in blk_mq_freeze_queue_nomemsave()
210 bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic) in __blk_mq_unfreeze_queue()
229 void blk_mq_unfreeze_queue_nomemrestore(struct request_queue *q) in blk_mq_unfreeze_queue_nomemrestore()
243 void blk_freeze_queue_start_non_owner(struct request_queue *q) in blk_freeze_queue_start_non_owner()
250 void blk_mq_unfreeze_queue_non_owner(struct request_queue *q) in blk_mq_unfreeze_queue_non_owner()
260 void blk_mq_quiesce_queue_nowait(struct request_queue *q) in blk_mq_quiesce_queue_nowait()
298 void blk_mq_quiesce_queue(struct request_queue *q) in blk_mq_quiesce_queue()
314 void blk_mq_unquiesce_queue(struct request_queue *q) in blk_mq_unquiesce_queue()
336 struct request_queue *q; in blk_mq_quiesce_tagset()
351 struct request_queue *q; in blk_mq_unquiesce_tagset()
362 void blk_mq_wake_waiters(struct request_queue *q) in blk_mq_wake_waiters()
372 void blk_rq_init(struct request_queue *q, struct request *rq) in blk_rq_init()
404 struct request_queue *q = data->q; in blk_mq_rq_ctx_init()
487 struct request_queue *q = data->q; in __blk_mq_alloc_requests()
569 static struct request *blk_mq_rq_cache_fill(struct request_queue *q, in blk_mq_rq_cache_fill()
598 static struct request *blk_mq_alloc_cached_request(struct request_queue *q, in blk_mq_alloc_cached_request()
633 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf, in blk_mq_alloc_request()
671 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, in blk_mq_alloc_request_hctx()
754 struct request_queue *q = rq->q; in blk_mq_finish_request()
771 struct request_queue *q = rq->q; in __blk_mq_free_request()
792 struct request_queue *q = rq->q; in blk_mq_free_request()
1162 struct request_queue *q = hctx->queue; in blk_mq_flush_tag_batch()
1343 struct request_queue *q = rq->q; in blk_mq_start_request()
1508 struct request_queue *q = rq->q; in __blk_mq_requeue_request()
1523 struct request_queue *q = rq->q; in blk_mq_requeue_request()
1542 struct request_queue *q = in blk_mq_requeue_work()
1543 container_of(work, struct request_queue, requeue_work.work); in blk_mq_requeue_work()
1577 void blk_mq_kick_requeue_list(struct request_queue *q) in blk_mq_kick_requeue_list()
1583 void blk_mq_delay_kick_requeue_list(struct request_queue *q, in blk_mq_delay_kick_requeue_list()
1620 bool blk_mq_queue_inflight(struct request_queue *q) in blk_mq_queue_inflight()
1709 struct request_queue *q = in blk_mq_timeout_work()
1710 container_of(work, struct request_queue, timeout_work); in blk_mq_timeout_work()
2055 static void blk_mq_release_budgets(struct request_queue *q, in blk_mq_release_budgets()
2092 struct request_queue *q = hctx->queue; in blk_mq_dispatch_rq_list()
2367 static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q) in blk_mq_get_sq_hctx()
2389 void blk_mq_run_hw_queues(struct request_queue *q, bool async) in blk_mq_run_hw_queues()
2417 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs) in blk_mq_delay_run_hw_queues()
2474 void blk_mq_stop_hw_queues(struct request_queue *q) in blk_mq_stop_hw_queues()
2492 void blk_mq_start_hw_queues(struct request_queue *q) in blk_mq_start_hw_queues()
2518 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) in blk_mq_start_stopped_hw_queues()
2597 struct request_queue *q = rq->q; in blk_mq_insert_request()
2683 struct request_queue *q = rq->q; in __blk_mq_issue_directly()
2824 static void __blk_mq_flush_list(struct request_queue *q, struct rq_list *rqs) in __blk_mq_flush_list()
2835 struct request_queue *this_q = rq->q; in blk_mq_extract_queue_requests()
2862 struct request_queue *q = rq_list_peek(rqs)->q; in blk_mq_dispatch_queue_requests()
3004 static bool blk_mq_attempt_bio_merge(struct request_queue *q, in blk_mq_attempt_bio_merge()
3016 static struct request *blk_mq_get_new_requests(struct request_queue *q, in blk_mq_get_new_requests()
3051 struct request_queue *q, blk_opf_t opf) in blk_mq_peek_cached_request()
3087 static bool bio_unaligned(const struct bio *bio, struct request_queue *q) in bio_unaligned()
3113 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in blk_mq_submit_bio()
3242 struct request_queue *q = rq->q; in blk_insert_cloned_request()
3861 static void blk_mq_remove_hw_queues_cpuhp(struct request_queue *q) in blk_mq_remove_hw_queues_cpuhp()
3883 static void blk_mq_add_hw_queues_cpuhp(struct request_queue *q) in blk_mq_add_hw_queues_cpuhp()
3924 static void blk_mq_exit_hctx(struct request_queue *q, in blk_mq_exit_hctx()
3949 static void blk_mq_exit_hw_queues(struct request_queue *q, in blk_mq_exit_hw_queues()
3963 static int blk_mq_init_hctx(struct request_queue *q, in blk_mq_init_hctx()
3995 blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set, in blk_mq_alloc_hctx()
4061 static void blk_mq_init_cpu_queues(struct request_queue *q, in blk_mq_init_cpu_queues()
4145 static void blk_mq_map_swqueue(struct request_queue *q) in blk_mq_map_swqueue()
4263 static void queue_set_hctx_shared(struct request_queue *q, bool shared) in queue_set_hctx_shared()
4281 struct request_queue *q; in blk_mq_update_tag_set_shared()
4293 static void blk_mq_del_queue_tag_set(struct request_queue *q) in blk_mq_del_queue_tag_set()
4310 struct request_queue *q) in blk_mq_add_queue_tag_set()
4331 static int blk_mq_alloc_ctxs(struct request_queue *q) in blk_mq_alloc_ctxs()
4364 void blk_mq_release(struct request_queue *q) in blk_mq_release()
4387 struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set, in blk_mq_alloc_queue()
4391 struct request_queue *q; in blk_mq_alloc_queue()
4423 void blk_mq_destroy_queue(struct request_queue *q) in blk_mq_destroy_queue()
4444 struct request_queue *q; in __blk_mq_alloc_disk()
4462 struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q, in blk_mq_alloc_disk_for_queue()
4486 struct blk_mq_tag_set *set, struct request_queue *q, in blk_mq_alloc_and_init_hctx()
4520 struct request_queue *q) in __blk_mq_realloc_hw_ctxs()
4560 struct request_queue *q) in blk_mq_realloc_hw_ctxs()
4572 struct request_queue *q) in blk_mq_init_allocated_queue()
4624 void blk_mq_exit_queue(struct request_queue *q) in blk_mq_exit_queue()
4920 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) in blk_mq_update_nr_requests()
4976 static void blk_mq_elv_switch_back(struct request_queue *q, in blk_mq_elv_switch_back()
4994 static int blk_mq_elv_switch_none(struct request_queue *q, in blk_mq_elv_switch_none()
5031 struct request_queue *q; in __blk_mq_update_nr_hw_queues()
5130 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx, in blk_hctx_poll()
5157 int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, in blk_mq_poll()
5168 struct request_queue *q = rq->q; in blk_rq_poll()
5189 void blk_mq_cancel_work_sync(struct request_queue *q) in blk_mq_cancel_work_sync()