Lines Matching full:plug
580 struct blk_plug *plug, in blk_mq_rq_cache_fill() argument
588 .nr_tags = plug->nr_ios, in blk_mq_rq_cache_fill()
589 .cached_rqs = &plug->cached_rqs, in blk_mq_rq_cache_fill()
596 plug->nr_ios = 1; in blk_mq_rq_cache_fill()
608 struct blk_plug *plug = current->plug; in blk_mq_alloc_cached_request() local
611 if (!plug) in blk_mq_alloc_cached_request()
614 if (rq_list_empty(&plug->cached_rqs)) { in blk_mq_alloc_cached_request()
615 if (plug->nr_ios == 1) in blk_mq_alloc_cached_request()
617 rq = blk_mq_rq_cache_fill(q, plug, opf, flags); in blk_mq_alloc_cached_request()
621 rq = rq_list_peek(&plug->cached_rqs); in blk_mq_alloc_cached_request()
630 rq_list_pop(&plug->cached_rqs); in blk_mq_alloc_cached_request()
803 void blk_mq_free_plug_rqs(struct blk_plug *plug) in blk_mq_free_plug_rqs() argument
807 while ((rq = rq_list_pop(&plug->cached_rqs)) != NULL) in blk_mq_free_plug_rqs()
1364 * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
1368 static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug) in blk_plug_max_rq_count() argument
1370 if (plug->multiple_queues) in blk_plug_max_rq_count()
1375 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) in blk_add_rq_to_plug() argument
1377 struct request *last = rq_list_peek(&plug->mq_list); in blk_add_rq_to_plug()
1379 if (!plug->rq_count) { in blk_add_rq_to_plug()
1381 } else if (plug->rq_count >= blk_plug_max_rq_count(plug) || in blk_add_rq_to_plug()
1384 blk_mq_flush_plug_list(plug, false); in blk_add_rq_to_plug()
1389 if (!plug->multiple_queues && last && last->q != rq->q) in blk_add_rq_to_plug()
1390 plug->multiple_queues = true; in blk_add_rq_to_plug()
1395 if (!plug->has_elevator && (rq->rq_flags & RQF_SCHED_TAGS)) in blk_add_rq_to_plug()
1396 plug->has_elevator = true; in blk_add_rq_to_plug()
1397 rq_list_add_tail(&plug->mq_list, rq); in blk_add_rq_to_plug()
1398 plug->rq_count++; in blk_add_rq_to_plug()
1422 if (current->plug && !at_head) { in blk_execute_rq_nowait()
1423 blk_add_rq_to_plug(current->plug, rq); in blk_execute_rq_nowait()
2573 * preemption doesn't flush plug list, so it's possible ctx->cpu is in blk_mq_insert_requests()
2781 static void blk_mq_plug_issue_direct(struct blk_plug *plug) in blk_mq_plug_issue_direct() argument
2788 while ((rq = rq_list_pop(&plug->mq_list))) { in blk_mq_plug_issue_direct()
2789 bool last = rq_list_empty(&plug->mq_list); in blk_mq_plug_issue_direct()
2821 struct blk_plug *plug) in __blk_mq_flush_plug_list() argument
2825 q->mq_ops->queue_rqs(&plug->mq_list); in __blk_mq_flush_plug_list()
2828 static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched) in blk_mq_dispatch_plug_list() argument
2838 struct request *rq = rq_list_pop(&plug->mq_list); in blk_mq_dispatch_plug_list()
2851 } while (!rq_list_empty(&plug->mq_list)); in blk_mq_dispatch_plug_list()
2853 plug->mq_list = requeue_list; in blk_mq_dispatch_plug_list()
2873 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) in blk_mq_flush_plug_list() argument
2880 * plug->mq_list via a schedule() in the driver's queue_rq() callback. in blk_mq_flush_plug_list()
2885 if (plug->rq_count == 0) in blk_mq_flush_plug_list()
2887 depth = plug->rq_count; in blk_mq_flush_plug_list()
2888 plug->rq_count = 0; in blk_mq_flush_plug_list()
2890 if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) { in blk_mq_flush_plug_list()
2893 rq = rq_list_peek(&plug->mq_list); in blk_mq_flush_plug_list()
2899 * If we do, we can dispatch the whole plug list in one go. We in blk_mq_flush_plug_list()
2905 __blk_mq_flush_plug_list(q, plug)); in blk_mq_flush_plug_list()
2906 if (rq_list_empty(&plug->mq_list)) in blk_mq_flush_plug_list()
2911 blk_mq_plug_issue_direct(plug)); in blk_mq_flush_plug_list()
2912 if (rq_list_empty(&plug->mq_list)) in blk_mq_flush_plug_list()
2917 blk_mq_dispatch_plug_list(plug, from_schedule); in blk_mq_flush_plug_list()
2918 } while (!rq_list_empty(&plug->mq_list)); in blk_mq_flush_plug_list()
2967 struct blk_plug *plug, in blk_mq_get_new_requests() argument
2980 if (plug) { in blk_mq_get_new_requests()
2981 data.nr_tags = plug->nr_ios; in blk_mq_get_new_requests()
2982 plug->nr_ios = 1; in blk_mq_get_new_requests()
2983 data.cached_rqs = &plug->cached_rqs; in blk_mq_get_new_requests()
2995 static struct request *blk_mq_peek_cached_request(struct blk_plug *plug, in blk_mq_peek_cached_request() argument
3001 if (!plug) in blk_mq_peek_cached_request()
3003 rq = rq_list_peek(&plug->cached_rqs); in blk_mq_peek_cached_request()
3014 static void blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug, in blk_mq_use_cached_rq() argument
3017 if (rq_list_pop(&plug->cached_rqs) != rq) in blk_mq_use_cached_rq()
3022 * plug and hence killed the cached_rq list as well. Pop this entry in blk_mq_use_cached_rq()
3050 * * We want to place request at plug queue for possible future merging
3059 struct blk_plug *plug = current->plug; in blk_mq_submit_bio() local
3067 * If the plug has a cached request for this queue, try to use it. in blk_mq_submit_bio()
3069 rq = blk_mq_peek_cached_request(plug, q, bio->bi_opf); in blk_mq_submit_bio()
3072 * A BIO that was released from a zone write plug has already been in blk_mq_submit_bio()
3126 blk_mq_use_cached_rq(rq, plug, bio); in blk_mq_submit_bio()
3128 rq = blk_mq_get_new_requests(q, plug, bio, nr_segs); in blk_mq_submit_bio()
3156 if (plug) { in blk_mq_submit_bio()
3157 blk_add_rq_to_plug(plug, rq); in blk_mq_submit_bio()