Lines Matching full:plug
619 /* If plug is not used, add new plug here to cache nsecs time. */
620 struct blk_plug plug;
625 blk_start_plug(&plug);
642 blk_finish_plug(&plug);
940 blk_flush_plug(current->plug, false);
1112 void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
1117 * If this is a nested plug, don't actually assign it.
1119 if (tsk->plug)
1122 plug->cur_ktime = 0;
1123 rq_list_init(&plug->mq_list);
1124 rq_list_init(&plug->cached_rqs);
1125 plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
1126 plug->rq_count = 0;
1127 plug->multiple_queues = false;
1128 plug->has_elevator = false;
1129 INIT_LIST_HEAD(&plug->cb_list);
1135 tsk->plug = plug;
1140 * @plug: The &struct blk_plug that needs to be initialized
1158 * plug. By flushing the pending I/O when the process goes to sleep, we avoid
1161 void blk_start_plug(struct blk_plug *plug)
1163 blk_start_plug_nr_ios(plug, 1);
1167 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1171 while (!list_empty(&plug->cb_list)) {
1172 list_splice_init(&plug->cb_list, &callbacks);
1187 struct blk_plug *plug = current->plug;
1190 if (!plug)
1193 list_for_each_entry(cb, &plug->cb_list, list)
1203 list_add(&cb->list, &plug->cb_list);
1209 void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
1211 if (!list_empty(&plug->cb_list))
1212 flush_plug_callbacks(plug, from_schedule);
1213 blk_mq_flush_plug_list(plug, from_schedule);
1220 if (unlikely(!rq_list_empty(&plug->cached_rqs)))
1221 blk_mq_free_plug_rqs(plug);
1223 plug->cur_ktime = 0;
1229 * @plug: The &struct blk_plug passed to blk_start_plug()
1237 void blk_finish_plug(struct blk_plug *plug)
1239 if (plug == current->plug) {
1240 __blk_flush_plug(plug, false);
1241 current->plug = NULL;