xref: /linux/block/blk-mq.c (revision 393de512e719a5fbd6712fc392a571ab287eb8ab)
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/kmemleak.h>
13 #include <linux/mm.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/workqueue.h>
17 #include <linux/smp.h>
18 #include <linux/llist.h>
19 #include <linux/list_sort.h>
20 #include <linux/cpu.h>
21 #include <linux/cache.h>
22 #include <linux/sched/sysctl.h>
23 #include <linux/sched/topology.h>
24 #include <linux/sched/signal.h>
25 #include <linux/delay.h>
26 #include <linux/crash_dump.h>
27 #include <linux/prefetch.h>
28 
29 #include <trace/events/block.h>
30 
31 #include <linux/blk-mq.h>
32 #include "blk.h"
33 #include "blk-mq.h"
34 #include "blk-mq-debugfs.h"
35 #include "blk-mq-tag.h"
36 #include "blk-stat.h"
37 #include "blk-wbt.h"
38 #include "blk-mq-sched.h"
39 
40 static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
41 static void blk_mq_poll_stats_start(struct request_queue *q);
42 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
43 
44 static int blk_mq_poll_stats_bkt(const struct request *rq)
45 {
46 	int ddir, bytes, bucket;
47 
48 	ddir = rq_data_dir(rq);
49 	bytes = blk_rq_bytes(rq);
50 
51 	bucket = ddir + 2*(ilog2(bytes) - 9);
52 
53 	if (bucket < 0)
54 		return -1;
55 	else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
56 		return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
57 
58 	return bucket;
59 }
60 
61 /*
62  * Check if any of the ctx's have pending work in this hardware queue
63  */
64 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
65 {
66 	return !list_empty_careful(&hctx->dispatch) ||
67 		sbitmap_any_bit_set(&hctx->ctx_map) ||
68 			blk_mq_sched_has_work(hctx);
69 }
70 
71 /*
72  * Mark this ctx as having pending work in this hardware queue
73  */
74 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
75 				     struct blk_mq_ctx *ctx)
76 {
77 	if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
78 		sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
79 }
80 
81 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
82 				      struct blk_mq_ctx *ctx)
83 {
84 	sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
85 }
86 
87 struct mq_inflight {
88 	struct hd_struct *part;
89 	unsigned int *inflight;
90 };
91 
92 static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
93 				  struct request *rq, void *priv,
94 				  bool reserved)
95 {
96 	struct mq_inflight *mi = priv;
97 
98 	if (blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) {
99 		/*
100 		 * index[0] counts the specific partition that was asked
101 		 * for. index[1] counts the ones that are active on the
102 		 * whole device, so increment that if mi->part is indeed
103 		 * a partition, and not a whole device.
104 		 */
105 		if (rq->part == mi->part)
106 			mi->inflight[0]++;
107 		if (mi->part->partno)
108 			mi->inflight[1]++;
109 	}
110 }
111 
112 void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
113 		      unsigned int inflight[2])
114 {
115 	struct mq_inflight mi = { .part = part, .inflight = inflight, };
116 
117 	inflight[0] = inflight[1] = 0;
118 	blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
119 }
120 
121 void blk_freeze_queue_start(struct request_queue *q)
122 {
123 	int freeze_depth;
124 
125 	freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
126 	if (freeze_depth == 1) {
127 		percpu_ref_kill(&q->q_usage_counter);
128 		if (q->mq_ops)
129 			blk_mq_run_hw_queues(q, false);
130 	}
131 }
132 EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
133 
134 void blk_mq_freeze_queue_wait(struct request_queue *q)
135 {
136 	wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
137 }
138 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
139 
140 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
141 				     unsigned long timeout)
142 {
143 	return wait_event_timeout(q->mq_freeze_wq,
144 					percpu_ref_is_zero(&q->q_usage_counter),
145 					timeout);
146 }
147 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
148 
149 /*
150  * Guarantee no request is in use, so we can change any data structure of
151  * the queue afterward.
152  */
153 void blk_freeze_queue(struct request_queue *q)
154 {
155 	/*
156 	 * In the !blk_mq case we are only calling this to kill the
157 	 * q_usage_counter, otherwise this increases the freeze depth
158 	 * and waits for it to return to zero.  For this reason there is
159 	 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
160 	 * exported to drivers as the only user for unfreeze is blk_mq.
161 	 */
162 	blk_freeze_queue_start(q);
163 	if (!q->mq_ops)
164 		blk_drain_queue(q);
165 	blk_mq_freeze_queue_wait(q);
166 }
167 
168 void blk_mq_freeze_queue(struct request_queue *q)
169 {
170 	/*
171 	 * ...just an alias to keep freeze and unfreeze actions balanced
172 	 * in the blk_mq_* namespace
173 	 */
174 	blk_freeze_queue(q);
175 }
176 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
177 
178 void blk_mq_unfreeze_queue(struct request_queue *q)
179 {
180 	int freeze_depth;
181 
182 	freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
183 	WARN_ON_ONCE(freeze_depth < 0);
184 	if (!freeze_depth) {
185 		percpu_ref_reinit(&q->q_usage_counter);
186 		wake_up_all(&q->mq_freeze_wq);
187 	}
188 }
189 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
190 
191 /*
192  * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
193  * mpt3sas driver such that this function can be removed.
194  */
195 void blk_mq_quiesce_queue_nowait(struct request_queue *q)
196 {
197 	blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
198 }
199 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
200 
201 /**
202  * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
203  * @q: request queue.
204  *
205  * Note: this function does not prevent that the struct request end_io()
206  * callback function is invoked. Once this function is returned, we make
207  * sure no dispatch can happen until the queue is unquiesced via
208  * blk_mq_unquiesce_queue().
209  */
210 void blk_mq_quiesce_queue(struct request_queue *q)
211 {
212 	struct blk_mq_hw_ctx *hctx;
213 	unsigned int i;
214 	bool rcu = false;
215 
216 	blk_mq_quiesce_queue_nowait(q);
217 
218 	queue_for_each_hw_ctx(q, hctx, i) {
219 		if (hctx->flags & BLK_MQ_F_BLOCKING)
220 			synchronize_srcu(hctx->srcu);
221 		else
222 			rcu = true;
223 	}
224 	if (rcu)
225 		synchronize_rcu();
226 }
227 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
228 
229 /*
230  * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
231  * @q: request queue.
232  *
233  * This function recovers queue into the state before quiescing
234  * which is done by blk_mq_quiesce_queue.
235  */
236 void blk_mq_unquiesce_queue(struct request_queue *q)
237 {
238 	blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
239 
240 	/* dispatch requests which are inserted during quiescing */
241 	blk_mq_run_hw_queues(q, true);
242 }
243 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
244 
245 void blk_mq_wake_waiters(struct request_queue *q)
246 {
247 	struct blk_mq_hw_ctx *hctx;
248 	unsigned int i;
249 
250 	queue_for_each_hw_ctx(q, hctx, i)
251 		if (blk_mq_hw_queue_mapped(hctx))
252 			blk_mq_tag_wakeup_all(hctx->tags, true);
253 }
254 
255 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
256 {
257 	return blk_mq_has_free_tags(hctx->tags);
258 }
259 EXPORT_SYMBOL(blk_mq_can_queue);
260 
261 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
262 		unsigned int tag, unsigned int op)
263 {
264 	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
265 	struct request *rq = tags->static_rqs[tag];
266 	req_flags_t rq_flags = 0;
267 
268 	if (data->flags & BLK_MQ_REQ_INTERNAL) {
269 		rq->tag = -1;
270 		rq->internal_tag = tag;
271 	} else {
272 		if (blk_mq_tag_busy(data->hctx)) {
273 			rq_flags = RQF_MQ_INFLIGHT;
274 			atomic_inc(&data->hctx->nr_active);
275 		}
276 		rq->tag = tag;
277 		rq->internal_tag = -1;
278 		data->hctx->tags->rqs[rq->tag] = rq;
279 	}
280 
281 	/* csd/requeue_work/fifo_time is initialized before use */
282 	rq->q = data->q;
283 	rq->mq_ctx = data->ctx;
284 	rq->rq_flags = rq_flags;
285 	rq->cpu = -1;
286 	rq->cmd_flags = op;
287 	if (data->flags & BLK_MQ_REQ_PREEMPT)
288 		rq->rq_flags |= RQF_PREEMPT;
289 	if (blk_queue_io_stat(data->q))
290 		rq->rq_flags |= RQF_IO_STAT;
291 	INIT_LIST_HEAD(&rq->queuelist);
292 	INIT_HLIST_NODE(&rq->hash);
293 	RB_CLEAR_NODE(&rq->rb_node);
294 	rq->rq_disk = NULL;
295 	rq->part = NULL;
296 	rq->start_time = jiffies;
297 	rq->nr_phys_segments = 0;
298 #if defined(CONFIG_BLK_DEV_INTEGRITY)
299 	rq->nr_integrity_segments = 0;
300 #endif
301 	rq->special = NULL;
302 	/* tag was already set */
303 	rq->extra_len = 0;
304 	rq->__deadline = 0;
305 
306 	INIT_LIST_HEAD(&rq->timeout_list);
307 	rq->timeout = 0;
308 
309 	rq->end_io = NULL;
310 	rq->end_io_data = NULL;
311 	rq->next_rq = NULL;
312 
313 #ifdef CONFIG_BLK_CGROUP
314 	rq->rl = NULL;
315 	set_start_time_ns(rq);
316 	rq->io_start_time_ns = 0;
317 #endif
318 
319 	data->ctx->rq_dispatched[op_is_sync(op)]++;
320 	return rq;
321 }
322 
323 static struct request *blk_mq_get_request(struct request_queue *q,
324 		struct bio *bio, unsigned int op,
325 		struct blk_mq_alloc_data *data)
326 {
327 	struct elevator_queue *e = q->elevator;
328 	struct request *rq;
329 	unsigned int tag;
330 	bool put_ctx_on_error = false;
331 
332 	blk_queue_enter_live(q);
333 	data->q = q;
334 	if (likely(!data->ctx)) {
335 		data->ctx = blk_mq_get_ctx(q);
336 		put_ctx_on_error = true;
337 	}
338 	if (likely(!data->hctx))
339 		data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
340 	if (op & REQ_NOWAIT)
341 		data->flags |= BLK_MQ_REQ_NOWAIT;
342 
343 	if (e) {
344 		data->flags |= BLK_MQ_REQ_INTERNAL;
345 
346 		/*
347 		 * Flush requests are special and go directly to the
348 		 * dispatch list.
349 		 */
350 		if (!op_is_flush(op) && e->type->ops.mq.limit_depth)
351 			e->type->ops.mq.limit_depth(op, data);
352 	}
353 
354 	tag = blk_mq_get_tag(data);
355 	if (tag == BLK_MQ_TAG_FAIL) {
356 		if (put_ctx_on_error) {
357 			blk_mq_put_ctx(data->ctx);
358 			data->ctx = NULL;
359 		}
360 		blk_queue_exit(q);
361 		return NULL;
362 	}
363 
364 	rq = blk_mq_rq_ctx_init(data, tag, op);
365 	if (!op_is_flush(op)) {
366 		rq->elv.icq = NULL;
367 		if (e && e->type->ops.mq.prepare_request) {
368 			if (e->type->icq_cache && rq_ioc(bio))
369 				blk_mq_sched_assign_ioc(rq, bio);
370 
371 			e->type->ops.mq.prepare_request(rq, bio);
372 			rq->rq_flags |= RQF_ELVPRIV;
373 		}
374 	}
375 	data->hctx->queued++;
376 	return rq;
377 }
378 
379 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
380 		blk_mq_req_flags_t flags)
381 {
382 	struct blk_mq_alloc_data alloc_data = { .flags = flags };
383 	struct request *rq;
384 	int ret;
385 
386 	ret = blk_queue_enter(q, flags);
387 	if (ret)
388 		return ERR_PTR(ret);
389 
390 	rq = blk_mq_get_request(q, NULL, op, &alloc_data);
391 	blk_queue_exit(q);
392 
393 	if (!rq)
394 		return ERR_PTR(-EWOULDBLOCK);
395 
396 	blk_mq_put_ctx(alloc_data.ctx);
397 
398 	rq->__data_len = 0;
399 	rq->__sector = (sector_t) -1;
400 	rq->bio = rq->biotail = NULL;
401 	return rq;
402 }
403 EXPORT_SYMBOL(blk_mq_alloc_request);
404 
405 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
406 	unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
407 {
408 	struct blk_mq_alloc_data alloc_data = { .flags = flags };
409 	struct request *rq;
410 	unsigned int cpu;
411 	int ret;
412 
413 	/*
414 	 * If the tag allocator sleeps we could get an allocation for a
415 	 * different hardware context.  No need to complicate the low level
416 	 * allocator for this for the rare use case of a command tied to
417 	 * a specific queue.
418 	 */
419 	if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
420 		return ERR_PTR(-EINVAL);
421 
422 	if (hctx_idx >= q->nr_hw_queues)
423 		return ERR_PTR(-EIO);
424 
425 	ret = blk_queue_enter(q, flags);
426 	if (ret)
427 		return ERR_PTR(ret);
428 
429 	/*
430 	 * Check if the hardware context is actually mapped to anything.
431 	 * If not tell the caller that it should skip this queue.
432 	 */
433 	alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
434 	if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
435 		blk_queue_exit(q);
436 		return ERR_PTR(-EXDEV);
437 	}
438 	cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask);
439 	alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
440 
441 	rq = blk_mq_get_request(q, NULL, op, &alloc_data);
442 	blk_queue_exit(q);
443 
444 	if (!rq)
445 		return ERR_PTR(-EWOULDBLOCK);
446 
447 	return rq;
448 }
449 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
450 
451 void blk_mq_free_request(struct request *rq)
452 {
453 	struct request_queue *q = rq->q;
454 	struct elevator_queue *e = q->elevator;
455 	struct blk_mq_ctx *ctx = rq->mq_ctx;
456 	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
457 	const int sched_tag = rq->internal_tag;
458 
459 	if (rq->rq_flags & RQF_ELVPRIV) {
460 		if (e && e->type->ops.mq.finish_request)
461 			e->type->ops.mq.finish_request(rq);
462 		if (rq->elv.icq) {
463 			put_io_context(rq->elv.icq->ioc);
464 			rq->elv.icq = NULL;
465 		}
466 	}
467 
468 	ctx->rq_completed[rq_is_sync(rq)]++;
469 	if (rq->rq_flags & RQF_MQ_INFLIGHT)
470 		atomic_dec(&hctx->nr_active);
471 
472 	if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
473 		laptop_io_completion(q->backing_dev_info);
474 
475 	wbt_done(q->rq_wb, &rq->issue_stat);
476 
477 	if (blk_rq_rl(rq))
478 		blk_put_rl(blk_rq_rl(rq));
479 
480 	blk_mq_rq_update_state(rq, MQ_RQ_IDLE);
481 	if (rq->tag != -1)
482 		blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
483 	if (sched_tag != -1)
484 		blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag);
485 	blk_mq_sched_restart(hctx);
486 	blk_queue_exit(q);
487 }
488 EXPORT_SYMBOL_GPL(blk_mq_free_request);
489 
490 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
491 {
492 	blk_account_io_done(rq);
493 
494 	if (rq->end_io) {
495 		wbt_done(rq->q->rq_wb, &rq->issue_stat);
496 		rq->end_io(rq, error);
497 	} else {
498 		if (unlikely(blk_bidi_rq(rq)))
499 			blk_mq_free_request(rq->next_rq);
500 		blk_mq_free_request(rq);
501 	}
502 }
503 EXPORT_SYMBOL(__blk_mq_end_request);
504 
505 void blk_mq_end_request(struct request *rq, blk_status_t error)
506 {
507 	if (blk_update_request(rq, error, blk_rq_bytes(rq)))
508 		BUG();
509 	__blk_mq_end_request(rq, error);
510 }
511 EXPORT_SYMBOL(blk_mq_end_request);
512 
513 static void __blk_mq_complete_request_remote(void *data)
514 {
515 	struct request *rq = data;
516 
517 	rq->q->softirq_done_fn(rq);
518 }
519 
520 static void __blk_mq_complete_request(struct request *rq)
521 {
522 	struct blk_mq_ctx *ctx = rq->mq_ctx;
523 	bool shared = false;
524 	int cpu;
525 
526 	WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT);
527 	blk_mq_rq_update_state(rq, MQ_RQ_COMPLETE);
528 
529 	if (rq->internal_tag != -1)
530 		blk_mq_sched_completed_request(rq);
531 	if (rq->rq_flags & RQF_STATS) {
532 		blk_mq_poll_stats_start(rq->q);
533 		blk_stat_add(rq);
534 	}
535 
536 	if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
537 		rq->q->softirq_done_fn(rq);
538 		return;
539 	}
540 
541 	cpu = get_cpu();
542 	if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
543 		shared = cpus_share_cache(cpu, ctx->cpu);
544 
545 	if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
546 		rq->csd.func = __blk_mq_complete_request_remote;
547 		rq->csd.info = rq;
548 		rq->csd.flags = 0;
549 		smp_call_function_single_async(ctx->cpu, &rq->csd);
550 	} else {
551 		rq->q->softirq_done_fn(rq);
552 	}
553 	put_cpu();
554 }
555 
556 static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
557 	__releases(hctx->srcu)
558 {
559 	if (!(hctx->flags & BLK_MQ_F_BLOCKING))
560 		rcu_read_unlock();
561 	else
562 		srcu_read_unlock(hctx->srcu, srcu_idx);
563 }
564 
565 static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
566 	__acquires(hctx->srcu)
567 {
568 	if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
569 		/* shut up gcc false positive */
570 		*srcu_idx = 0;
571 		rcu_read_lock();
572 	} else
573 		*srcu_idx = srcu_read_lock(hctx->srcu);
574 }
575 
576 static void blk_mq_rq_update_aborted_gstate(struct request *rq, u64 gstate)
577 {
578 	unsigned long flags;
579 
580 	/*
581 	 * blk_mq_rq_aborted_gstate() is used from the completion path and
582 	 * can thus be called from irq context.  u64_stats_fetch in the
583 	 * middle of update on the same CPU leads to lockup.  Disable irq
584 	 * while updating.
585 	 */
586 	local_irq_save(flags);
587 	u64_stats_update_begin(&rq->aborted_gstate_sync);
588 	rq->aborted_gstate = gstate;
589 	u64_stats_update_end(&rq->aborted_gstate_sync);
590 	local_irq_restore(flags);
591 }
592 
593 static u64 blk_mq_rq_aborted_gstate(struct request *rq)
594 {
595 	unsigned int start;
596 	u64 aborted_gstate;
597 
598 	do {
599 		start = u64_stats_fetch_begin(&rq->aborted_gstate_sync);
600 		aborted_gstate = rq->aborted_gstate;
601 	} while (u64_stats_fetch_retry(&rq->aborted_gstate_sync, start));
602 
603 	return aborted_gstate;
604 }
605 
606 /**
607  * blk_mq_complete_request - end I/O on a request
608  * @rq:		the request being processed
609  *
610  * Description:
611  *	Ends all I/O on a request. It does not handle partial completions.
612  *	The actual completion happens out-of-order, through a IPI handler.
613  **/
614 void blk_mq_complete_request(struct request *rq)
615 {
616 	struct request_queue *q = rq->q;
617 	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
618 	int srcu_idx;
619 
620 	if (unlikely(blk_should_fake_timeout(q)))
621 		return;
622 
623 	/*
624 	 * If @rq->aborted_gstate equals the current instance, timeout is
625 	 * claiming @rq and we lost.  This is synchronized through
626 	 * hctx_lock().  See blk_mq_timeout_work() for details.
627 	 *
628 	 * Completion path never blocks and we can directly use RCU here
629 	 * instead of hctx_lock() which can be either RCU or SRCU.
630 	 * However, that would complicate paths which want to synchronize
631 	 * against us.  Let stay in sync with the issue path so that
632 	 * hctx_lock() covers both issue and completion paths.
633 	 */
634 	hctx_lock(hctx, &srcu_idx);
635 	if (blk_mq_rq_aborted_gstate(rq) != rq->gstate)
636 		__blk_mq_complete_request(rq);
637 	hctx_unlock(hctx, srcu_idx);
638 }
639 EXPORT_SYMBOL(blk_mq_complete_request);
640 
641 int blk_mq_request_started(struct request *rq)
642 {
643 	return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
644 }
645 EXPORT_SYMBOL_GPL(blk_mq_request_started);
646 
647 void blk_mq_start_request(struct request *rq)
648 {
649 	struct request_queue *q = rq->q;
650 
651 	blk_mq_sched_started_request(rq);
652 
653 	trace_block_rq_issue(q, rq);
654 
655 	if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
656 		blk_stat_set_issue(&rq->issue_stat, blk_rq_sectors(rq));
657 		rq->rq_flags |= RQF_STATS;
658 		wbt_issue(q->rq_wb, &rq->issue_stat);
659 	}
660 
661 	WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
662 
663 	/*
664 	 * Mark @rq in-flight which also advances the generation number,
665 	 * and register for timeout.  Protect with a seqcount to allow the
666 	 * timeout path to read both @rq->gstate and @rq->deadline
667 	 * coherently.
668 	 *
669 	 * This is the only place where a request is marked in-flight.  If
670 	 * the timeout path reads an in-flight @rq->gstate, the
671 	 * @rq->deadline it reads together under @rq->gstate_seq is
672 	 * guaranteed to be the matching one.
673 	 */
674 	preempt_disable();
675 	write_seqcount_begin(&rq->gstate_seq);
676 
677 	blk_mq_rq_update_state(rq, MQ_RQ_IN_FLIGHT);
678 	blk_add_timer(rq);
679 
680 	write_seqcount_end(&rq->gstate_seq);
681 	preempt_enable();
682 
683 	if (q->dma_drain_size && blk_rq_bytes(rq)) {
684 		/*
685 		 * Make sure space for the drain appears.  We know we can do
686 		 * this because max_hw_segments has been adjusted to be one
687 		 * fewer than the device can handle.
688 		 */
689 		rq->nr_phys_segments++;
690 	}
691 }
692 EXPORT_SYMBOL(blk_mq_start_request);
693 
694 /*
695  * When we reach here because queue is busy, it's safe to change the state
696  * to IDLE without checking @rq->aborted_gstate because we should still be
697  * holding the RCU read lock and thus protected against timeout.
698  */
699 static void __blk_mq_requeue_request(struct request *rq)
700 {
701 	struct request_queue *q = rq->q;
702 
703 	blk_mq_put_driver_tag(rq);
704 
705 	trace_block_rq_requeue(q, rq);
706 	wbt_requeue(q->rq_wb, &rq->issue_stat);
707 
708 	if (blk_mq_rq_state(rq) != MQ_RQ_IDLE) {
709 		blk_mq_rq_update_state(rq, MQ_RQ_IDLE);
710 		if (q->dma_drain_size && blk_rq_bytes(rq))
711 			rq->nr_phys_segments--;
712 	}
713 }
714 
715 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
716 {
717 	__blk_mq_requeue_request(rq);
718 
719 	/* this request will be re-inserted to io scheduler queue */
720 	blk_mq_sched_requeue_request(rq);
721 
722 	BUG_ON(blk_queued_rq(rq));
723 	blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
724 }
725 EXPORT_SYMBOL(blk_mq_requeue_request);
726 
727 static void blk_mq_requeue_work(struct work_struct *work)
728 {
729 	struct request_queue *q =
730 		container_of(work, struct request_queue, requeue_work.work);
731 	LIST_HEAD(rq_list);
732 	struct request *rq, *next;
733 
734 	spin_lock_irq(&q->requeue_lock);
735 	list_splice_init(&q->requeue_list, &rq_list);
736 	spin_unlock_irq(&q->requeue_lock);
737 
738 	list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
739 		if (!(rq->rq_flags & RQF_SOFTBARRIER))
740 			continue;
741 
742 		rq->rq_flags &= ~RQF_SOFTBARRIER;
743 		list_del_init(&rq->queuelist);
744 		blk_mq_sched_insert_request(rq, true, false, false);
745 	}
746 
747 	while (!list_empty(&rq_list)) {
748 		rq = list_entry(rq_list.next, struct request, queuelist);
749 		list_del_init(&rq->queuelist);
750 		blk_mq_sched_insert_request(rq, false, false, false);
751 	}
752 
753 	blk_mq_run_hw_queues(q, false);
754 }
755 
756 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
757 				bool kick_requeue_list)
758 {
759 	struct request_queue *q = rq->q;
760 	unsigned long flags;
761 
762 	/*
763 	 * We abuse this flag that is otherwise used by the I/O scheduler to
764 	 * request head insertion from the workqueue.
765 	 */
766 	BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
767 
768 	spin_lock_irqsave(&q->requeue_lock, flags);
769 	if (at_head) {
770 		rq->rq_flags |= RQF_SOFTBARRIER;
771 		list_add(&rq->queuelist, &q->requeue_list);
772 	} else {
773 		list_add_tail(&rq->queuelist, &q->requeue_list);
774 	}
775 	spin_unlock_irqrestore(&q->requeue_lock, flags);
776 
777 	if (kick_requeue_list)
778 		blk_mq_kick_requeue_list(q);
779 }
780 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
781 
782 void blk_mq_kick_requeue_list(struct request_queue *q)
783 {
784 	kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
785 }
786 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
787 
788 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
789 				    unsigned long msecs)
790 {
791 	kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
792 				    msecs_to_jiffies(msecs));
793 }
794 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
795 
796 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
797 {
798 	if (tag < tags->nr_tags) {
799 		prefetch(tags->rqs[tag]);
800 		return tags->rqs[tag];
801 	}
802 
803 	return NULL;
804 }
805 EXPORT_SYMBOL(blk_mq_tag_to_rq);
806 
807 struct blk_mq_timeout_data {
808 	unsigned long next;
809 	unsigned int next_set;
810 	unsigned int nr_expired;
811 };
812 
813 static void blk_mq_rq_timed_out(struct request *req, bool reserved)
814 {
815 	const struct blk_mq_ops *ops = req->q->mq_ops;
816 	enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
817 
818 	req->rq_flags |= RQF_MQ_TIMEOUT_EXPIRED;
819 
820 	if (ops->timeout)
821 		ret = ops->timeout(req, reserved);
822 
823 	switch (ret) {
824 	case BLK_EH_HANDLED:
825 		__blk_mq_complete_request(req);
826 		break;
827 	case BLK_EH_RESET_TIMER:
828 		/*
829 		 * As nothing prevents from completion happening while
830 		 * ->aborted_gstate is set, this may lead to ignored
831 		 * completions and further spurious timeouts.
832 		 */
833 		blk_mq_rq_update_aborted_gstate(req, 0);
834 		blk_add_timer(req);
835 		break;
836 	case BLK_EH_NOT_HANDLED:
837 		break;
838 	default:
839 		printk(KERN_ERR "block: bad eh return: %d\n", ret);
840 		break;
841 	}
842 }
843 
844 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
845 		struct request *rq, void *priv, bool reserved)
846 {
847 	struct blk_mq_timeout_data *data = priv;
848 	unsigned long gstate, deadline;
849 	int start;
850 
851 	might_sleep();
852 
853 	if (rq->rq_flags & RQF_MQ_TIMEOUT_EXPIRED)
854 		return;
855 
856 	/* read coherent snapshots of @rq->state_gen and @rq->deadline */
857 	while (true) {
858 		start = read_seqcount_begin(&rq->gstate_seq);
859 		gstate = READ_ONCE(rq->gstate);
860 		deadline = blk_rq_deadline(rq);
861 		if (!read_seqcount_retry(&rq->gstate_seq, start))
862 			break;
863 		cond_resched();
864 	}
865 
866 	/* if in-flight && overdue, mark for abortion */
867 	if ((gstate & MQ_RQ_STATE_MASK) == MQ_RQ_IN_FLIGHT &&
868 	    time_after_eq(jiffies, deadline)) {
869 		blk_mq_rq_update_aborted_gstate(rq, gstate);
870 		data->nr_expired++;
871 		hctx->nr_expired++;
872 	} else if (!data->next_set || time_after(data->next, deadline)) {
873 		data->next = deadline;
874 		data->next_set = 1;
875 	}
876 }
877 
878 static void blk_mq_terminate_expired(struct blk_mq_hw_ctx *hctx,
879 		struct request *rq, void *priv, bool reserved)
880 {
881 	/*
882 	 * We marked @rq->aborted_gstate and waited for RCU.  If there were
883 	 * completions that we lost to, they would have finished and
884 	 * updated @rq->gstate by now; otherwise, the completion path is
885 	 * now guaranteed to see @rq->aborted_gstate and yield.  If
886 	 * @rq->aborted_gstate still matches @rq->gstate, @rq is ours.
887 	 */
888 	if (!(rq->rq_flags & RQF_MQ_TIMEOUT_EXPIRED) &&
889 	    READ_ONCE(rq->gstate) == rq->aborted_gstate)
890 		blk_mq_rq_timed_out(rq, reserved);
891 }
892 
893 static void blk_mq_timeout_work(struct work_struct *work)
894 {
895 	struct request_queue *q =
896 		container_of(work, struct request_queue, timeout_work);
897 	struct blk_mq_timeout_data data = {
898 		.next		= 0,
899 		.next_set	= 0,
900 		.nr_expired	= 0,
901 	};
902 	struct blk_mq_hw_ctx *hctx;
903 	int i;
904 
905 	/* A deadlock might occur if a request is stuck requiring a
906 	 * timeout at the same time a queue freeze is waiting
907 	 * completion, since the timeout code would not be able to
908 	 * acquire the queue reference here.
909 	 *
910 	 * That's why we don't use blk_queue_enter here; instead, we use
911 	 * percpu_ref_tryget directly, because we need to be able to
912 	 * obtain a reference even in the short window between the queue
913 	 * starting to freeze, by dropping the first reference in
914 	 * blk_freeze_queue_start, and the moment the last request is
915 	 * consumed, marked by the instant q_usage_counter reaches
916 	 * zero.
917 	 */
918 	if (!percpu_ref_tryget(&q->q_usage_counter))
919 		return;
920 
921 	/* scan for the expired ones and set their ->aborted_gstate */
922 	blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
923 
924 	if (data.nr_expired) {
925 		bool has_rcu = false;
926 
927 		/*
928 		 * Wait till everyone sees ->aborted_gstate.  The
929 		 * sequential waits for SRCUs aren't ideal.  If this ever
930 		 * becomes a problem, we can add per-hw_ctx rcu_head and
931 		 * wait in parallel.
932 		 */
933 		queue_for_each_hw_ctx(q, hctx, i) {
934 			if (!hctx->nr_expired)
935 				continue;
936 
937 			if (!(hctx->flags & BLK_MQ_F_BLOCKING))
938 				has_rcu = true;
939 			else
940 				synchronize_srcu(hctx->srcu);
941 
942 			hctx->nr_expired = 0;
943 		}
944 		if (has_rcu)
945 			synchronize_rcu();
946 
947 		/* terminate the ones we won */
948 		blk_mq_queue_tag_busy_iter(q, blk_mq_terminate_expired, NULL);
949 	}
950 
951 	if (data.next_set) {
952 		data.next = blk_rq_timeout(round_jiffies_up(data.next));
953 		mod_timer(&q->timeout, data.next);
954 	} else {
955 		/*
956 		 * Request timeouts are handled as a forward rolling timer. If
957 		 * we end up here it means that no requests are pending and
958 		 * also that no request has been pending for a while. Mark
959 		 * each hctx as idle.
960 		 */
961 		queue_for_each_hw_ctx(q, hctx, i) {
962 			/* the hctx may be unmapped, so check it here */
963 			if (blk_mq_hw_queue_mapped(hctx))
964 				blk_mq_tag_idle(hctx);
965 		}
966 	}
967 	blk_queue_exit(q);
968 }
969 
970 struct flush_busy_ctx_data {
971 	struct blk_mq_hw_ctx *hctx;
972 	struct list_head *list;
973 };
974 
975 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
976 {
977 	struct flush_busy_ctx_data *flush_data = data;
978 	struct blk_mq_hw_ctx *hctx = flush_data->hctx;
979 	struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
980 
981 	spin_lock(&ctx->lock);
982 	list_splice_tail_init(&ctx->rq_list, flush_data->list);
983 	sbitmap_clear_bit(sb, bitnr);
984 	spin_unlock(&ctx->lock);
985 	return true;
986 }
987 
988 /*
989  * Process software queues that have been marked busy, splicing them
990  * to the for-dispatch
991  */
992 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
993 {
994 	struct flush_busy_ctx_data data = {
995 		.hctx = hctx,
996 		.list = list,
997 	};
998 
999 	sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
1000 }
1001 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
1002 
1003 struct dispatch_rq_data {
1004 	struct blk_mq_hw_ctx *hctx;
1005 	struct request *rq;
1006 };
1007 
1008 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
1009 		void *data)
1010 {
1011 	struct dispatch_rq_data *dispatch_data = data;
1012 	struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
1013 	struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1014 
1015 	spin_lock(&ctx->lock);
1016 	if (unlikely(!list_empty(&ctx->rq_list))) {
1017 		dispatch_data->rq = list_entry_rq(ctx->rq_list.next);
1018 		list_del_init(&dispatch_data->rq->queuelist);
1019 		if (list_empty(&ctx->rq_list))
1020 			sbitmap_clear_bit(sb, bitnr);
1021 	}
1022 	spin_unlock(&ctx->lock);
1023 
1024 	return !dispatch_data->rq;
1025 }
1026 
1027 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
1028 					struct blk_mq_ctx *start)
1029 {
1030 	unsigned off = start ? start->index_hw : 0;
1031 	struct dispatch_rq_data data = {
1032 		.hctx = hctx,
1033 		.rq   = NULL,
1034 	};
1035 
1036 	__sbitmap_for_each_set(&hctx->ctx_map, off,
1037 			       dispatch_rq_from_ctx, &data);
1038 
1039 	return data.rq;
1040 }
1041 
1042 static inline unsigned int queued_to_index(unsigned int queued)
1043 {
1044 	if (!queued)
1045 		return 0;
1046 
1047 	return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
1048 }
1049 
1050 bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
1051 			   bool wait)
1052 {
1053 	struct blk_mq_alloc_data data = {
1054 		.q = rq->q,
1055 		.hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
1056 		.flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
1057 	};
1058 
1059 	might_sleep_if(wait);
1060 
1061 	if (rq->tag != -1)
1062 		goto done;
1063 
1064 	if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
1065 		data.flags |= BLK_MQ_REQ_RESERVED;
1066 
1067 	rq->tag = blk_mq_get_tag(&data);
1068 	if (rq->tag >= 0) {
1069 		if (blk_mq_tag_busy(data.hctx)) {
1070 			rq->rq_flags |= RQF_MQ_INFLIGHT;
1071 			atomic_inc(&data.hctx->nr_active);
1072 		}
1073 		data.hctx->tags->rqs[rq->tag] = rq;
1074 	}
1075 
1076 done:
1077 	if (hctx)
1078 		*hctx = data.hctx;
1079 	return rq->tag != -1;
1080 }
1081 
1082 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
1083 				int flags, void *key)
1084 {
1085 	struct blk_mq_hw_ctx *hctx;
1086 
1087 	hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1088 
1089 	list_del_init(&wait->entry);
1090 	blk_mq_run_hw_queue(hctx, true);
1091 	return 1;
1092 }
1093 
1094 /*
1095  * Mark us waiting for a tag. For shared tags, this involves hooking us into
1096  * the tag wakeups. For non-shared tags, we can simply mark us needing a
1097  * restart. For both cases, take care to check the condition again after
1098  * marking us as waiting.
1099  */
1100 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx,
1101 				 struct request *rq)
1102 {
1103 	struct blk_mq_hw_ctx *this_hctx = *hctx;
1104 	struct sbq_wait_state *ws;
1105 	wait_queue_entry_t *wait;
1106 	bool ret;
1107 
1108 	if (!(this_hctx->flags & BLK_MQ_F_TAG_SHARED)) {
1109 		if (!test_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state))
1110 			set_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state);
1111 
1112 		/*
1113 		 * It's possible that a tag was freed in the window between the
1114 		 * allocation failure and adding the hardware queue to the wait
1115 		 * queue.
1116 		 *
1117 		 * Don't clear RESTART here, someone else could have set it.
1118 		 * At most this will cost an extra queue run.
1119 		 */
1120 		return blk_mq_get_driver_tag(rq, hctx, false);
1121 	}
1122 
1123 	wait = &this_hctx->dispatch_wait;
1124 	if (!list_empty_careful(&wait->entry))
1125 		return false;
1126 
1127 	spin_lock(&this_hctx->lock);
1128 	if (!list_empty(&wait->entry)) {
1129 		spin_unlock(&this_hctx->lock);
1130 		return false;
1131 	}
1132 
1133 	ws = bt_wait_ptr(&this_hctx->tags->bitmap_tags, this_hctx);
1134 	add_wait_queue(&ws->wait, wait);
1135 
1136 	/*
1137 	 * It's possible that a tag was freed in the window between the
1138 	 * allocation failure and adding the hardware queue to the wait
1139 	 * queue.
1140 	 */
1141 	ret = blk_mq_get_driver_tag(rq, hctx, false);
1142 	if (!ret) {
1143 		spin_unlock(&this_hctx->lock);
1144 		return false;
1145 	}
1146 
1147 	/*
1148 	 * We got a tag, remove ourselves from the wait queue to ensure
1149 	 * someone else gets the wakeup.
1150 	 */
1151 	spin_lock_irq(&ws->wait.lock);
1152 	list_del_init(&wait->entry);
1153 	spin_unlock_irq(&ws->wait.lock);
1154 	spin_unlock(&this_hctx->lock);
1155 
1156 	return true;
1157 }
1158 
1159 #define BLK_MQ_RESOURCE_DELAY	3		/* ms units */
1160 
1161 bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
1162 			     bool got_budget)
1163 {
1164 	struct blk_mq_hw_ctx *hctx;
1165 	struct request *rq, *nxt;
1166 	bool no_tag = false;
1167 	int errors, queued;
1168 	blk_status_t ret = BLK_STS_OK;
1169 
1170 	if (list_empty(list))
1171 		return false;
1172 
1173 	WARN_ON(!list_is_singular(list) && got_budget);
1174 
1175 	/*
1176 	 * Now process all the entries, sending them to the driver.
1177 	 */
1178 	errors = queued = 0;
1179 	do {
1180 		struct blk_mq_queue_data bd;
1181 
1182 		rq = list_first_entry(list, struct request, queuelist);
1183 		if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
1184 			/*
1185 			 * The initial allocation attempt failed, so we need to
1186 			 * rerun the hardware queue when a tag is freed. The
1187 			 * waitqueue takes care of that. If the queue is run
1188 			 * before we add this entry back on the dispatch list,
1189 			 * we'll re-run it below.
1190 			 */
1191 			if (!blk_mq_mark_tag_wait(&hctx, rq)) {
1192 				if (got_budget)
1193 					blk_mq_put_dispatch_budget(hctx);
1194 				/*
1195 				 * For non-shared tags, the RESTART check
1196 				 * will suffice.
1197 				 */
1198 				if (hctx->flags & BLK_MQ_F_TAG_SHARED)
1199 					no_tag = true;
1200 				break;
1201 			}
1202 		}
1203 
1204 		if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) {
1205 			blk_mq_put_driver_tag(rq);
1206 			break;
1207 		}
1208 
1209 		list_del_init(&rq->queuelist);
1210 
1211 		bd.rq = rq;
1212 
1213 		/*
1214 		 * Flag last if we have no more requests, or if we have more
1215 		 * but can't assign a driver tag to it.
1216 		 */
1217 		if (list_empty(list))
1218 			bd.last = true;
1219 		else {
1220 			nxt = list_first_entry(list, struct request, queuelist);
1221 			bd.last = !blk_mq_get_driver_tag(nxt, NULL, false);
1222 		}
1223 
1224 		ret = q->mq_ops->queue_rq(hctx, &bd);
1225 		if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
1226 			/*
1227 			 * If an I/O scheduler has been configured and we got a
1228 			 * driver tag for the next request already, free it
1229 			 * again.
1230 			 */
1231 			if (!list_empty(list)) {
1232 				nxt = list_first_entry(list, struct request, queuelist);
1233 				blk_mq_put_driver_tag(nxt);
1234 			}
1235 			list_add(&rq->queuelist, list);
1236 			__blk_mq_requeue_request(rq);
1237 			break;
1238 		}
1239 
1240 		if (unlikely(ret != BLK_STS_OK)) {
1241 			errors++;
1242 			blk_mq_end_request(rq, BLK_STS_IOERR);
1243 			continue;
1244 		}
1245 
1246 		queued++;
1247 	} while (!list_empty(list));
1248 
1249 	hctx->dispatched[queued_to_index(queued)]++;
1250 
1251 	/*
1252 	 * Any items that need requeuing? Stuff them into hctx->dispatch,
1253 	 * that is where we will continue on next queue run.
1254 	 */
1255 	if (!list_empty(list)) {
1256 		bool needs_restart;
1257 
1258 		spin_lock(&hctx->lock);
1259 		list_splice_init(list, &hctx->dispatch);
1260 		spin_unlock(&hctx->lock);
1261 
1262 		/*
1263 		 * If SCHED_RESTART was set by the caller of this function and
1264 		 * it is no longer set that means that it was cleared by another
1265 		 * thread and hence that a queue rerun is needed.
1266 		 *
1267 		 * If 'no_tag' is set, that means that we failed getting
1268 		 * a driver tag with an I/O scheduler attached. If our dispatch
1269 		 * waitqueue is no longer active, ensure that we run the queue
1270 		 * AFTER adding our entries back to the list.
1271 		 *
1272 		 * If no I/O scheduler has been configured it is possible that
1273 		 * the hardware queue got stopped and restarted before requests
1274 		 * were pushed back onto the dispatch list. Rerun the queue to
1275 		 * avoid starvation. Notes:
1276 		 * - blk_mq_run_hw_queue() checks whether or not a queue has
1277 		 *   been stopped before rerunning a queue.
1278 		 * - Some but not all block drivers stop a queue before
1279 		 *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
1280 		 *   and dm-rq.
1281 		 *
1282 		 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
1283 		 * bit is set, run queue after a delay to avoid IO stalls
1284 		 * that could otherwise occur if the queue is idle.
1285 		 */
1286 		needs_restart = blk_mq_sched_needs_restart(hctx);
1287 		if (!needs_restart ||
1288 		    (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
1289 			blk_mq_run_hw_queue(hctx, true);
1290 		else if (needs_restart && (ret == BLK_STS_RESOURCE))
1291 			blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
1292 	}
1293 
1294 	return (queued + errors) != 0;
1295 }
1296 
1297 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1298 {
1299 	int srcu_idx;
1300 
1301 	/*
1302 	 * We should be running this queue from one of the CPUs that
1303 	 * are mapped to it.
1304 	 *
1305 	 * There are at least two related races now between setting
1306 	 * hctx->next_cpu from blk_mq_hctx_next_cpu() and running
1307 	 * __blk_mq_run_hw_queue():
1308 	 *
1309 	 * - hctx->next_cpu is found offline in blk_mq_hctx_next_cpu(),
1310 	 *   but later it becomes online, then this warning is harmless
1311 	 *   at all
1312 	 *
1313 	 * - hctx->next_cpu is found online in blk_mq_hctx_next_cpu(),
1314 	 *   but later it becomes offline, then the warning can't be
1315 	 *   triggered, and we depend on blk-mq timeout handler to
1316 	 *   handle dispatched requests to this hctx
1317 	 */
1318 	if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1319 		cpu_online(hctx->next_cpu)) {
1320 		printk(KERN_WARNING "run queue from wrong CPU %d, hctx %s\n",
1321 			raw_smp_processor_id(),
1322 			cpumask_empty(hctx->cpumask) ? "inactive": "active");
1323 		dump_stack();
1324 	}
1325 
1326 	/*
1327 	 * We can't run the queue inline with ints disabled. Ensure that
1328 	 * we catch bad users of this early.
1329 	 */
1330 	WARN_ON_ONCE(in_interrupt());
1331 
1332 	might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1333 
1334 	hctx_lock(hctx, &srcu_idx);
1335 	blk_mq_sched_dispatch_requests(hctx);
1336 	hctx_unlock(hctx, srcu_idx);
1337 }
1338 
1339 /*
1340  * It'd be great if the workqueue API had a way to pass
1341  * in a mask and had some smarts for more clever placement.
1342  * For now we just round-robin here, switching for every
1343  * BLK_MQ_CPU_WORK_BATCH queued items.
1344  */
1345 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1346 {
1347 	bool tried = false;
1348 
1349 	if (hctx->queue->nr_hw_queues == 1)
1350 		return WORK_CPU_UNBOUND;
1351 
1352 	if (--hctx->next_cpu_batch <= 0) {
1353 		int next_cpu;
1354 select_cpu:
1355 		next_cpu = cpumask_next_and(hctx->next_cpu, hctx->cpumask,
1356 				cpu_online_mask);
1357 		if (next_cpu >= nr_cpu_ids)
1358 			next_cpu = cpumask_first_and(hctx->cpumask,cpu_online_mask);
1359 
1360 		/*
1361 		 * No online CPU is found, so have to make sure hctx->next_cpu
1362 		 * is set correctly for not breaking workqueue.
1363 		 */
1364 		if (next_cpu >= nr_cpu_ids)
1365 			hctx->next_cpu = cpumask_first(hctx->cpumask);
1366 		else
1367 			hctx->next_cpu = next_cpu;
1368 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1369 	}
1370 
1371 	/*
1372 	 * Do unbound schedule if we can't find a online CPU for this hctx,
1373 	 * and it should only happen in the path of handling CPU DEAD.
1374 	 */
1375 	if (!cpu_online(hctx->next_cpu)) {
1376 		if (!tried) {
1377 			tried = true;
1378 			goto select_cpu;
1379 		}
1380 
1381 		/*
1382 		 * Make sure to re-select CPU next time once after CPUs
1383 		 * in hctx->cpumask become online again.
1384 		 */
1385 		hctx->next_cpu_batch = 1;
1386 		return WORK_CPU_UNBOUND;
1387 	}
1388 	return hctx->next_cpu;
1389 }
1390 
1391 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
1392 					unsigned long msecs)
1393 {
1394 	if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
1395 		return;
1396 
1397 	if (unlikely(blk_mq_hctx_stopped(hctx)))
1398 		return;
1399 
1400 	if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
1401 		int cpu = get_cpu();
1402 		if (cpumask_test_cpu(cpu, hctx->cpumask)) {
1403 			__blk_mq_run_hw_queue(hctx);
1404 			put_cpu();
1405 			return;
1406 		}
1407 
1408 		put_cpu();
1409 	}
1410 
1411 	kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
1412 				    msecs_to_jiffies(msecs));
1413 }
1414 
1415 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1416 {
1417 	__blk_mq_delay_run_hw_queue(hctx, true, msecs);
1418 }
1419 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
1420 
1421 bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1422 {
1423 	int srcu_idx;
1424 	bool need_run;
1425 
1426 	/*
1427 	 * When queue is quiesced, we may be switching io scheduler, or
1428 	 * updating nr_hw_queues, or other things, and we can't run queue
1429 	 * any more, even __blk_mq_hctx_has_pending() can't be called safely.
1430 	 *
1431 	 * And queue will be rerun in blk_mq_unquiesce_queue() if it is
1432 	 * quiesced.
1433 	 */
1434 	hctx_lock(hctx, &srcu_idx);
1435 	need_run = !blk_queue_quiesced(hctx->queue) &&
1436 		blk_mq_hctx_has_pending(hctx);
1437 	hctx_unlock(hctx, srcu_idx);
1438 
1439 	if (need_run) {
1440 		__blk_mq_delay_run_hw_queue(hctx, async, 0);
1441 		return true;
1442 	}
1443 
1444 	return false;
1445 }
1446 EXPORT_SYMBOL(blk_mq_run_hw_queue);
1447 
1448 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
1449 {
1450 	struct blk_mq_hw_ctx *hctx;
1451 	int i;
1452 
1453 	queue_for_each_hw_ctx(q, hctx, i) {
1454 		if (blk_mq_hctx_stopped(hctx))
1455 			continue;
1456 
1457 		blk_mq_run_hw_queue(hctx, async);
1458 	}
1459 }
1460 EXPORT_SYMBOL(blk_mq_run_hw_queues);
1461 
1462 /**
1463  * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1464  * @q: request queue.
1465  *
1466  * The caller is responsible for serializing this function against
1467  * blk_mq_{start,stop}_hw_queue().
1468  */
1469 bool blk_mq_queue_stopped(struct request_queue *q)
1470 {
1471 	struct blk_mq_hw_ctx *hctx;
1472 	int i;
1473 
1474 	queue_for_each_hw_ctx(q, hctx, i)
1475 		if (blk_mq_hctx_stopped(hctx))
1476 			return true;
1477 
1478 	return false;
1479 }
1480 EXPORT_SYMBOL(blk_mq_queue_stopped);
1481 
1482 /*
1483  * This function is often used for pausing .queue_rq() by driver when
1484  * there isn't enough resource or some conditions aren't satisfied, and
1485  * BLK_STS_RESOURCE is usually returned.
1486  *
1487  * We do not guarantee that dispatch can be drained or blocked
1488  * after blk_mq_stop_hw_queue() returns. Please use
1489  * blk_mq_quiesce_queue() for that requirement.
1490  */
1491 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1492 {
1493 	cancel_delayed_work(&hctx->run_work);
1494 
1495 	set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1496 }
1497 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1498 
1499 /*
1500  * This function is often used for pausing .queue_rq() by driver when
1501  * there isn't enough resource or some conditions aren't satisfied, and
1502  * BLK_STS_RESOURCE is usually returned.
1503  *
1504  * We do not guarantee that dispatch can be drained or blocked
1505  * after blk_mq_stop_hw_queues() returns. Please use
1506  * blk_mq_quiesce_queue() for that requirement.
1507  */
1508 void blk_mq_stop_hw_queues(struct request_queue *q)
1509 {
1510 	struct blk_mq_hw_ctx *hctx;
1511 	int i;
1512 
1513 	queue_for_each_hw_ctx(q, hctx, i)
1514 		blk_mq_stop_hw_queue(hctx);
1515 }
1516 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1517 
1518 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1519 {
1520 	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1521 
1522 	blk_mq_run_hw_queue(hctx, false);
1523 }
1524 EXPORT_SYMBOL(blk_mq_start_hw_queue);
1525 
1526 void blk_mq_start_hw_queues(struct request_queue *q)
1527 {
1528 	struct blk_mq_hw_ctx *hctx;
1529 	int i;
1530 
1531 	queue_for_each_hw_ctx(q, hctx, i)
1532 		blk_mq_start_hw_queue(hctx);
1533 }
1534 EXPORT_SYMBOL(blk_mq_start_hw_queues);
1535 
1536 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1537 {
1538 	if (!blk_mq_hctx_stopped(hctx))
1539 		return;
1540 
1541 	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1542 	blk_mq_run_hw_queue(hctx, async);
1543 }
1544 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1545 
1546 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
1547 {
1548 	struct blk_mq_hw_ctx *hctx;
1549 	int i;
1550 
1551 	queue_for_each_hw_ctx(q, hctx, i)
1552 		blk_mq_start_stopped_hw_queue(hctx, async);
1553 }
1554 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1555 
1556 static void blk_mq_run_work_fn(struct work_struct *work)
1557 {
1558 	struct blk_mq_hw_ctx *hctx;
1559 
1560 	hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
1561 
1562 	/*
1563 	 * If we are stopped, don't run the queue. The exception is if
1564 	 * BLK_MQ_S_START_ON_RUN is set. For that case, we auto-clear
1565 	 * the STOPPED bit and run it.
1566 	 */
1567 	if (test_bit(BLK_MQ_S_STOPPED, &hctx->state)) {
1568 		if (!test_bit(BLK_MQ_S_START_ON_RUN, &hctx->state))
1569 			return;
1570 
1571 		clear_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
1572 		clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1573 	}
1574 
1575 	__blk_mq_run_hw_queue(hctx);
1576 }
1577 
1578 
1579 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1580 {
1581 	if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
1582 		return;
1583 
1584 	/*
1585 	 * Stop the hw queue, then modify currently delayed work.
1586 	 * This should prevent us from running the queue prematurely.
1587 	 * Mark the queue as auto-clearing STOPPED when it runs.
1588 	 */
1589 	blk_mq_stop_hw_queue(hctx);
1590 	set_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
1591 	kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1592 					&hctx->run_work,
1593 					msecs_to_jiffies(msecs));
1594 }
1595 EXPORT_SYMBOL(blk_mq_delay_queue);
1596 
1597 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
1598 					    struct request *rq,
1599 					    bool at_head)
1600 {
1601 	struct blk_mq_ctx *ctx = rq->mq_ctx;
1602 
1603 	lockdep_assert_held(&ctx->lock);
1604 
1605 	trace_block_rq_insert(hctx->queue, rq);
1606 
1607 	if (at_head)
1608 		list_add(&rq->queuelist, &ctx->rq_list);
1609 	else
1610 		list_add_tail(&rq->queuelist, &ctx->rq_list);
1611 }
1612 
1613 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1614 			     bool at_head)
1615 {
1616 	struct blk_mq_ctx *ctx = rq->mq_ctx;
1617 
1618 	lockdep_assert_held(&ctx->lock);
1619 
1620 	__blk_mq_insert_req_list(hctx, rq, at_head);
1621 	blk_mq_hctx_mark_pending(hctx, ctx);
1622 }
1623 
1624 /*
1625  * Should only be used carefully, when the caller knows we want to
1626  * bypass a potential IO scheduler on the target device.
1627  */
1628 void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
1629 {
1630 	struct blk_mq_ctx *ctx = rq->mq_ctx;
1631 	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
1632 
1633 	spin_lock(&hctx->lock);
1634 	list_add_tail(&rq->queuelist, &hctx->dispatch);
1635 	spin_unlock(&hctx->lock);
1636 
1637 	if (run_queue)
1638 		blk_mq_run_hw_queue(hctx, false);
1639 }
1640 
1641 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1642 			    struct list_head *list)
1643 
1644 {
1645 	/*
1646 	 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1647 	 * offline now
1648 	 */
1649 	spin_lock(&ctx->lock);
1650 	while (!list_empty(list)) {
1651 		struct request *rq;
1652 
1653 		rq = list_first_entry(list, struct request, queuelist);
1654 		BUG_ON(rq->mq_ctx != ctx);
1655 		list_del_init(&rq->queuelist);
1656 		__blk_mq_insert_req_list(hctx, rq, false);
1657 	}
1658 	blk_mq_hctx_mark_pending(hctx, ctx);
1659 	spin_unlock(&ctx->lock);
1660 }
1661 
1662 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1663 {
1664 	struct request *rqa = container_of(a, struct request, queuelist);
1665 	struct request *rqb = container_of(b, struct request, queuelist);
1666 
1667 	return !(rqa->mq_ctx < rqb->mq_ctx ||
1668 		 (rqa->mq_ctx == rqb->mq_ctx &&
1669 		  blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1670 }
1671 
1672 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1673 {
1674 	struct blk_mq_ctx *this_ctx;
1675 	struct request_queue *this_q;
1676 	struct request *rq;
1677 	LIST_HEAD(list);
1678 	LIST_HEAD(ctx_list);
1679 	unsigned int depth;
1680 
1681 	list_splice_init(&plug->mq_list, &list);
1682 
1683 	list_sort(NULL, &list, plug_ctx_cmp);
1684 
1685 	this_q = NULL;
1686 	this_ctx = NULL;
1687 	depth = 0;
1688 
1689 	while (!list_empty(&list)) {
1690 		rq = list_entry_rq(list.next);
1691 		list_del_init(&rq->queuelist);
1692 		BUG_ON(!rq->q);
1693 		if (rq->mq_ctx != this_ctx) {
1694 			if (this_ctx) {
1695 				trace_block_unplug(this_q, depth, from_schedule);
1696 				blk_mq_sched_insert_requests(this_q, this_ctx,
1697 								&ctx_list,
1698 								from_schedule);
1699 			}
1700 
1701 			this_ctx = rq->mq_ctx;
1702 			this_q = rq->q;
1703 			depth = 0;
1704 		}
1705 
1706 		depth++;
1707 		list_add_tail(&rq->queuelist, &ctx_list);
1708 	}
1709 
1710 	/*
1711 	 * If 'this_ctx' is set, we know we have entries to complete
1712 	 * on 'ctx_list'. Do those.
1713 	 */
1714 	if (this_ctx) {
1715 		trace_block_unplug(this_q, depth, from_schedule);
1716 		blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
1717 						from_schedule);
1718 	}
1719 }
1720 
1721 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1722 {
1723 	blk_init_request_from_bio(rq, bio);
1724 
1725 	blk_rq_set_rl(rq, blk_get_rl(rq->q, bio));
1726 
1727 	blk_account_io_start(rq, true);
1728 }
1729 
1730 static inline void blk_mq_queue_io(struct blk_mq_hw_ctx *hctx,
1731 				   struct blk_mq_ctx *ctx,
1732 				   struct request *rq)
1733 {
1734 	spin_lock(&ctx->lock);
1735 	__blk_mq_insert_request(hctx, rq, false);
1736 	spin_unlock(&ctx->lock);
1737 }
1738 
1739 static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1740 {
1741 	if (rq->tag != -1)
1742 		return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false);
1743 
1744 	return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
1745 }
1746 
1747 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
1748 					    struct request *rq,
1749 					    blk_qc_t *cookie)
1750 {
1751 	struct request_queue *q = rq->q;
1752 	struct blk_mq_queue_data bd = {
1753 		.rq = rq,
1754 		.last = true,
1755 	};
1756 	blk_qc_t new_cookie;
1757 	blk_status_t ret;
1758 
1759 	new_cookie = request_to_qc_t(hctx, rq);
1760 
1761 	/*
1762 	 * For OK queue, we are done. For error, caller may kill it.
1763 	 * Any other error (busy), just add it to our list as we
1764 	 * previously would have done.
1765 	 */
1766 	ret = q->mq_ops->queue_rq(hctx, &bd);
1767 	switch (ret) {
1768 	case BLK_STS_OK:
1769 		*cookie = new_cookie;
1770 		break;
1771 	case BLK_STS_RESOURCE:
1772 	case BLK_STS_DEV_RESOURCE:
1773 		__blk_mq_requeue_request(rq);
1774 		break;
1775 	default:
1776 		*cookie = BLK_QC_T_NONE;
1777 		break;
1778 	}
1779 
1780 	return ret;
1781 }
1782 
1783 static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1784 						struct request *rq,
1785 						blk_qc_t *cookie,
1786 						bool bypass_insert)
1787 {
1788 	struct request_queue *q = rq->q;
1789 	bool run_queue = true;
1790 
1791 	/*
1792 	 * RCU or SRCU read lock is needed before checking quiesced flag.
1793 	 *
1794 	 * When queue is stopped or quiesced, ignore 'bypass_insert' from
1795 	 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
1796 	 * and avoid driver to try to dispatch again.
1797 	 */
1798 	if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
1799 		run_queue = false;
1800 		bypass_insert = false;
1801 		goto insert;
1802 	}
1803 
1804 	if (q->elevator && !bypass_insert)
1805 		goto insert;
1806 
1807 	if (!blk_mq_get_driver_tag(rq, NULL, false))
1808 		goto insert;
1809 
1810 	if (!blk_mq_get_dispatch_budget(hctx)) {
1811 		blk_mq_put_driver_tag(rq);
1812 		goto insert;
1813 	}
1814 
1815 	return __blk_mq_issue_directly(hctx, rq, cookie);
1816 insert:
1817 	if (bypass_insert)
1818 		return BLK_STS_RESOURCE;
1819 
1820 	blk_mq_sched_insert_request(rq, false, run_queue, false);
1821 	return BLK_STS_OK;
1822 }
1823 
1824 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1825 		struct request *rq, blk_qc_t *cookie)
1826 {
1827 	blk_status_t ret;
1828 	int srcu_idx;
1829 
1830 	might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1831 
1832 	hctx_lock(hctx, &srcu_idx);
1833 
1834 	ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false);
1835 	if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
1836 		blk_mq_sched_insert_request(rq, false, true, false);
1837 	else if (ret != BLK_STS_OK)
1838 		blk_mq_end_request(rq, ret);
1839 
1840 	hctx_unlock(hctx, srcu_idx);
1841 }
1842 
1843 blk_status_t blk_mq_request_issue_directly(struct request *rq)
1844 {
1845 	blk_status_t ret;
1846 	int srcu_idx;
1847 	blk_qc_t unused_cookie;
1848 	struct blk_mq_ctx *ctx = rq->mq_ctx;
1849 	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
1850 
1851 	hctx_lock(hctx, &srcu_idx);
1852 	ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true);
1853 	hctx_unlock(hctx, srcu_idx);
1854 
1855 	return ret;
1856 }
1857 
1858 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1859 {
1860 	const int is_sync = op_is_sync(bio->bi_opf);
1861 	const int is_flush_fua = op_is_flush(bio->bi_opf);
1862 	struct blk_mq_alloc_data data = { .flags = 0 };
1863 	struct request *rq;
1864 	unsigned int request_count = 0;
1865 	struct blk_plug *plug;
1866 	struct request *same_queue_rq = NULL;
1867 	blk_qc_t cookie;
1868 	unsigned int wb_acct;
1869 
1870 	blk_queue_bounce(q, &bio);
1871 
1872 	blk_queue_split(q, &bio);
1873 
1874 	if (!bio_integrity_prep(bio))
1875 		return BLK_QC_T_NONE;
1876 
1877 	if (!is_flush_fua && !blk_queue_nomerges(q) &&
1878 	    blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1879 		return BLK_QC_T_NONE;
1880 
1881 	if (blk_mq_sched_bio_merge(q, bio))
1882 		return BLK_QC_T_NONE;
1883 
1884 	wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1885 
1886 	trace_block_getrq(q, bio, bio->bi_opf);
1887 
1888 	rq = blk_mq_get_request(q, bio, bio->bi_opf, &data);
1889 	if (unlikely(!rq)) {
1890 		__wbt_done(q->rq_wb, wb_acct);
1891 		if (bio->bi_opf & REQ_NOWAIT)
1892 			bio_wouldblock_error(bio);
1893 		return BLK_QC_T_NONE;
1894 	}
1895 
1896 	wbt_track(&rq->issue_stat, wb_acct);
1897 
1898 	cookie = request_to_qc_t(data.hctx, rq);
1899 
1900 	plug = current->plug;
1901 	if (unlikely(is_flush_fua)) {
1902 		blk_mq_put_ctx(data.ctx);
1903 		blk_mq_bio_to_request(rq, bio);
1904 
1905 		/* bypass scheduler for flush rq */
1906 		blk_insert_flush(rq);
1907 		blk_mq_run_hw_queue(data.hctx, true);
1908 	} else if (plug && q->nr_hw_queues == 1) {
1909 		struct request *last = NULL;
1910 
1911 		blk_mq_put_ctx(data.ctx);
1912 		blk_mq_bio_to_request(rq, bio);
1913 
1914 		/*
1915 		 * @request_count may become stale because of schedule
1916 		 * out, so check the list again.
1917 		 */
1918 		if (list_empty(&plug->mq_list))
1919 			request_count = 0;
1920 		else if (blk_queue_nomerges(q))
1921 			request_count = blk_plug_queued_count(q);
1922 
1923 		if (!request_count)
1924 			trace_block_plug(q);
1925 		else
1926 			last = list_entry_rq(plug->mq_list.prev);
1927 
1928 		if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1929 		    blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1930 			blk_flush_plug_list(plug, false);
1931 			trace_block_plug(q);
1932 		}
1933 
1934 		list_add_tail(&rq->queuelist, &plug->mq_list);
1935 	} else if (plug && !blk_queue_nomerges(q)) {
1936 		blk_mq_bio_to_request(rq, bio);
1937 
1938 		/*
1939 		 * We do limited plugging. If the bio can be merged, do that.
1940 		 * Otherwise the existing request in the plug list will be
1941 		 * issued. So the plug list will have one request at most
1942 		 * The plug list might get flushed before this. If that happens,
1943 		 * the plug list is empty, and same_queue_rq is invalid.
1944 		 */
1945 		if (list_empty(&plug->mq_list))
1946 			same_queue_rq = NULL;
1947 		if (same_queue_rq)
1948 			list_del_init(&same_queue_rq->queuelist);
1949 		list_add_tail(&rq->queuelist, &plug->mq_list);
1950 
1951 		blk_mq_put_ctx(data.ctx);
1952 
1953 		if (same_queue_rq) {
1954 			data.hctx = blk_mq_map_queue(q,
1955 					same_queue_rq->mq_ctx->cpu);
1956 			blk_mq_try_issue_directly(data.hctx, same_queue_rq,
1957 					&cookie);
1958 		}
1959 	} else if (q->nr_hw_queues > 1 && is_sync) {
1960 		blk_mq_put_ctx(data.ctx);
1961 		blk_mq_bio_to_request(rq, bio);
1962 		blk_mq_try_issue_directly(data.hctx, rq, &cookie);
1963 	} else if (q->elevator) {
1964 		blk_mq_put_ctx(data.ctx);
1965 		blk_mq_bio_to_request(rq, bio);
1966 		blk_mq_sched_insert_request(rq, false, true, true);
1967 	} else {
1968 		blk_mq_put_ctx(data.ctx);
1969 		blk_mq_bio_to_request(rq, bio);
1970 		blk_mq_queue_io(data.hctx, data.ctx, rq);
1971 		blk_mq_run_hw_queue(data.hctx, true);
1972 	}
1973 
1974 	return cookie;
1975 }
1976 
1977 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1978 		     unsigned int hctx_idx)
1979 {
1980 	struct page *page;
1981 
1982 	if (tags->rqs && set->ops->exit_request) {
1983 		int i;
1984 
1985 		for (i = 0; i < tags->nr_tags; i++) {
1986 			struct request *rq = tags->static_rqs[i];
1987 
1988 			if (!rq)
1989 				continue;
1990 			set->ops->exit_request(set, rq, hctx_idx);
1991 			tags->static_rqs[i] = NULL;
1992 		}
1993 	}
1994 
1995 	while (!list_empty(&tags->page_list)) {
1996 		page = list_first_entry(&tags->page_list, struct page, lru);
1997 		list_del_init(&page->lru);
1998 		/*
1999 		 * Remove kmemleak object previously allocated in
2000 		 * blk_mq_init_rq_map().
2001 		 */
2002 		kmemleak_free(page_address(page));
2003 		__free_pages(page, page->private);
2004 	}
2005 }
2006 
2007 void blk_mq_free_rq_map(struct blk_mq_tags *tags)
2008 {
2009 	kfree(tags->rqs);
2010 	tags->rqs = NULL;
2011 	kfree(tags->static_rqs);
2012 	tags->static_rqs = NULL;
2013 
2014 	blk_mq_free_tags(tags);
2015 }
2016 
2017 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
2018 					unsigned int hctx_idx,
2019 					unsigned int nr_tags,
2020 					unsigned int reserved_tags)
2021 {
2022 	struct blk_mq_tags *tags;
2023 	int node;
2024 
2025 	node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
2026 	if (node == NUMA_NO_NODE)
2027 		node = set->numa_node;
2028 
2029 	tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
2030 				BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
2031 	if (!tags)
2032 		return NULL;
2033 
2034 	tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *),
2035 				 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
2036 				 node);
2037 	if (!tags->rqs) {
2038 		blk_mq_free_tags(tags);
2039 		return NULL;
2040 	}
2041 
2042 	tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *),
2043 				 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
2044 				 node);
2045 	if (!tags->static_rqs) {
2046 		kfree(tags->rqs);
2047 		blk_mq_free_tags(tags);
2048 		return NULL;
2049 	}
2050 
2051 	return tags;
2052 }
2053 
2054 static size_t order_to_size(unsigned int order)
2055 {
2056 	return (size_t)PAGE_SIZE << order;
2057 }
2058 
2059 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
2060 			       unsigned int hctx_idx, int node)
2061 {
2062 	int ret;
2063 
2064 	if (set->ops->init_request) {
2065 		ret = set->ops->init_request(set, rq, hctx_idx, node);
2066 		if (ret)
2067 			return ret;
2068 	}
2069 
2070 	seqcount_init(&rq->gstate_seq);
2071 	u64_stats_init(&rq->aborted_gstate_sync);
2072 	return 0;
2073 }
2074 
2075 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
2076 		     unsigned int hctx_idx, unsigned int depth)
2077 {
2078 	unsigned int i, j, entries_per_page, max_order = 4;
2079 	size_t rq_size, left;
2080 	int node;
2081 
2082 	node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
2083 	if (node == NUMA_NO_NODE)
2084 		node = set->numa_node;
2085 
2086 	INIT_LIST_HEAD(&tags->page_list);
2087 
2088 	/*
2089 	 * rq_size is the size of the request plus driver payload, rounded
2090 	 * to the cacheline size
2091 	 */
2092 	rq_size = round_up(sizeof(struct request) + set->cmd_size,
2093 				cache_line_size());
2094 	left = rq_size * depth;
2095 
2096 	for (i = 0; i < depth; ) {
2097 		int this_order = max_order;
2098 		struct page *page;
2099 		int to_do;
2100 		void *p;
2101 
2102 		while (this_order && left < order_to_size(this_order - 1))
2103 			this_order--;
2104 
2105 		do {
2106 			page = alloc_pages_node(node,
2107 				GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
2108 				this_order);
2109 			if (page)
2110 				break;
2111 			if (!this_order--)
2112 				break;
2113 			if (order_to_size(this_order) < rq_size)
2114 				break;
2115 		} while (1);
2116 
2117 		if (!page)
2118 			goto fail;
2119 
2120 		page->private = this_order;
2121 		list_add_tail(&page->lru, &tags->page_list);
2122 
2123 		p = page_address(page);
2124 		/*
2125 		 * Allow kmemleak to scan these pages as they contain pointers
2126 		 * to additional allocations like via ops->init_request().
2127 		 */
2128 		kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
2129 		entries_per_page = order_to_size(this_order) / rq_size;
2130 		to_do = min(entries_per_page, depth - i);
2131 		left -= to_do * rq_size;
2132 		for (j = 0; j < to_do; j++) {
2133 			struct request *rq = p;
2134 
2135 			tags->static_rqs[i] = rq;
2136 			if (blk_mq_init_request(set, rq, hctx_idx, node)) {
2137 				tags->static_rqs[i] = NULL;
2138 				goto fail;
2139 			}
2140 
2141 			p += rq_size;
2142 			i++;
2143 		}
2144 	}
2145 	return 0;
2146 
2147 fail:
2148 	blk_mq_free_rqs(set, tags, hctx_idx);
2149 	return -ENOMEM;
2150 }
2151 
2152 /*
2153  * 'cpu' is going away. splice any existing rq_list entries from this
2154  * software queue to the hw queue dispatch list, and ensure that it
2155  * gets run.
2156  */
2157 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
2158 {
2159 	struct blk_mq_hw_ctx *hctx;
2160 	struct blk_mq_ctx *ctx;
2161 	LIST_HEAD(tmp);
2162 
2163 	hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
2164 	ctx = __blk_mq_get_ctx(hctx->queue, cpu);
2165 
2166 	spin_lock(&ctx->lock);
2167 	if (!list_empty(&ctx->rq_list)) {
2168 		list_splice_init(&ctx->rq_list, &tmp);
2169 		blk_mq_hctx_clear_pending(hctx, ctx);
2170 	}
2171 	spin_unlock(&ctx->lock);
2172 
2173 	if (list_empty(&tmp))
2174 		return 0;
2175 
2176 	spin_lock(&hctx->lock);
2177 	list_splice_tail_init(&tmp, &hctx->dispatch);
2178 	spin_unlock(&hctx->lock);
2179 
2180 	blk_mq_run_hw_queue(hctx, true);
2181 	return 0;
2182 }
2183 
2184 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
2185 {
2186 	cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
2187 					    &hctx->cpuhp_dead);
2188 }
2189 
2190 /* hctx->ctxs will be freed in queue's release handler */
2191 static void blk_mq_exit_hctx(struct request_queue *q,
2192 		struct blk_mq_tag_set *set,
2193 		struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
2194 {
2195 	blk_mq_debugfs_unregister_hctx(hctx);
2196 
2197 	if (blk_mq_hw_queue_mapped(hctx))
2198 		blk_mq_tag_idle(hctx);
2199 
2200 	if (set->ops->exit_request)
2201 		set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
2202 
2203 	blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
2204 
2205 	if (set->ops->exit_hctx)
2206 		set->ops->exit_hctx(hctx, hctx_idx);
2207 
2208 	if (hctx->flags & BLK_MQ_F_BLOCKING)
2209 		cleanup_srcu_struct(hctx->srcu);
2210 
2211 	blk_mq_remove_cpuhp(hctx);
2212 	blk_free_flush_queue(hctx->fq);
2213 	sbitmap_free(&hctx->ctx_map);
2214 }
2215 
2216 static void blk_mq_exit_hw_queues(struct request_queue *q,
2217 		struct blk_mq_tag_set *set, int nr_queue)
2218 {
2219 	struct blk_mq_hw_ctx *hctx;
2220 	unsigned int i;
2221 
2222 	queue_for_each_hw_ctx(q, hctx, i) {
2223 		if (i == nr_queue)
2224 			break;
2225 		blk_mq_exit_hctx(q, set, hctx, i);
2226 	}
2227 }
2228 
2229 static int blk_mq_init_hctx(struct request_queue *q,
2230 		struct blk_mq_tag_set *set,
2231 		struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
2232 {
2233 	int node;
2234 
2235 	node = hctx->numa_node;
2236 	if (node == NUMA_NO_NODE)
2237 		node = hctx->numa_node = set->numa_node;
2238 
2239 	INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
2240 	spin_lock_init(&hctx->lock);
2241 	INIT_LIST_HEAD(&hctx->dispatch);
2242 	hctx->queue = q;
2243 	hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
2244 
2245 	cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
2246 
2247 	hctx->tags = set->tags[hctx_idx];
2248 
2249 	/*
2250 	 * Allocate space for all possible cpus to avoid allocation at
2251 	 * runtime
2252 	 */
2253 	hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
2254 					GFP_KERNEL, node);
2255 	if (!hctx->ctxs)
2256 		goto unregister_cpu_notifier;
2257 
2258 	if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
2259 			      node))
2260 		goto free_ctxs;
2261 
2262 	hctx->nr_ctx = 0;
2263 
2264 	init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
2265 	INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
2266 
2267 	if (set->ops->init_hctx &&
2268 	    set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
2269 		goto free_bitmap;
2270 
2271 	if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
2272 		goto exit_hctx;
2273 
2274 	hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
2275 	if (!hctx->fq)
2276 		goto sched_exit_hctx;
2277 
2278 	if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node))
2279 		goto free_fq;
2280 
2281 	if (hctx->flags & BLK_MQ_F_BLOCKING)
2282 		init_srcu_struct(hctx->srcu);
2283 
2284 	blk_mq_debugfs_register_hctx(q, hctx);
2285 
2286 	return 0;
2287 
2288  free_fq:
2289 	kfree(hctx->fq);
2290  sched_exit_hctx:
2291 	blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
2292  exit_hctx:
2293 	if (set->ops->exit_hctx)
2294 		set->ops->exit_hctx(hctx, hctx_idx);
2295  free_bitmap:
2296 	sbitmap_free(&hctx->ctx_map);
2297  free_ctxs:
2298 	kfree(hctx->ctxs);
2299  unregister_cpu_notifier:
2300 	blk_mq_remove_cpuhp(hctx);
2301 	return -1;
2302 }
2303 
2304 static void blk_mq_init_cpu_queues(struct request_queue *q,
2305 				   unsigned int nr_hw_queues)
2306 {
2307 	unsigned int i;
2308 
2309 	for_each_possible_cpu(i) {
2310 		struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
2311 		struct blk_mq_hw_ctx *hctx;
2312 
2313 		__ctx->cpu = i;
2314 		spin_lock_init(&__ctx->lock);
2315 		INIT_LIST_HEAD(&__ctx->rq_list);
2316 		__ctx->queue = q;
2317 
2318 		/*
2319 		 * Set local node, IFF we have more than one hw queue. If
2320 		 * not, we remain on the home node of the device
2321 		 */
2322 		hctx = blk_mq_map_queue(q, i);
2323 		if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
2324 			hctx->numa_node = local_memory_node(cpu_to_node(i));
2325 	}
2326 }
2327 
2328 static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
2329 {
2330 	int ret = 0;
2331 
2332 	set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
2333 					set->queue_depth, set->reserved_tags);
2334 	if (!set->tags[hctx_idx])
2335 		return false;
2336 
2337 	ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
2338 				set->queue_depth);
2339 	if (!ret)
2340 		return true;
2341 
2342 	blk_mq_free_rq_map(set->tags[hctx_idx]);
2343 	set->tags[hctx_idx] = NULL;
2344 	return false;
2345 }
2346 
2347 static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2348 					 unsigned int hctx_idx)
2349 {
2350 	if (set->tags[hctx_idx]) {
2351 		blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
2352 		blk_mq_free_rq_map(set->tags[hctx_idx]);
2353 		set->tags[hctx_idx] = NULL;
2354 	}
2355 }
2356 
2357 static void blk_mq_map_swqueue(struct request_queue *q)
2358 {
2359 	unsigned int i, hctx_idx;
2360 	struct blk_mq_hw_ctx *hctx;
2361 	struct blk_mq_ctx *ctx;
2362 	struct blk_mq_tag_set *set = q->tag_set;
2363 
2364 	/*
2365 	 * Avoid others reading imcomplete hctx->cpumask through sysfs
2366 	 */
2367 	mutex_lock(&q->sysfs_lock);
2368 
2369 	queue_for_each_hw_ctx(q, hctx, i) {
2370 		cpumask_clear(hctx->cpumask);
2371 		hctx->nr_ctx = 0;
2372 	}
2373 
2374 	/*
2375 	 * Map software to hardware queues.
2376 	 *
2377 	 * If the cpu isn't present, the cpu is mapped to first hctx.
2378 	 */
2379 	for_each_possible_cpu(i) {
2380 		hctx_idx = q->mq_map[i];
2381 		/* unmapped hw queue can be remapped after CPU topo changed */
2382 		if (!set->tags[hctx_idx] &&
2383 		    !__blk_mq_alloc_rq_map(set, hctx_idx)) {
2384 			/*
2385 			 * If tags initialization fail for some hctx,
2386 			 * that hctx won't be brought online.  In this
2387 			 * case, remap the current ctx to hctx[0] which
2388 			 * is guaranteed to always have tags allocated
2389 			 */
2390 			q->mq_map[i] = 0;
2391 		}
2392 
2393 		ctx = per_cpu_ptr(q->queue_ctx, i);
2394 		hctx = blk_mq_map_queue(q, i);
2395 
2396 		cpumask_set_cpu(i, hctx->cpumask);
2397 		ctx->index_hw = hctx->nr_ctx;
2398 		hctx->ctxs[hctx->nr_ctx++] = ctx;
2399 	}
2400 
2401 	mutex_unlock(&q->sysfs_lock);
2402 
2403 	queue_for_each_hw_ctx(q, hctx, i) {
2404 		/*
2405 		 * If no software queues are mapped to this hardware queue,
2406 		 * disable it and free the request entries.
2407 		 */
2408 		if (!hctx->nr_ctx) {
2409 			/* Never unmap queue 0.  We need it as a
2410 			 * fallback in case of a new remap fails
2411 			 * allocation
2412 			 */
2413 			if (i && set->tags[i])
2414 				blk_mq_free_map_and_requests(set, i);
2415 
2416 			hctx->tags = NULL;
2417 			continue;
2418 		}
2419 
2420 		hctx->tags = set->tags[i];
2421 		WARN_ON(!hctx->tags);
2422 
2423 		/*
2424 		 * Set the map size to the number of mapped software queues.
2425 		 * This is more accurate and more efficient than looping
2426 		 * over all possibly mapped software queues.
2427 		 */
2428 		sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
2429 
2430 		/*
2431 		 * Initialize batch roundrobin counts
2432 		 */
2433 		hctx->next_cpu = cpumask_first_and(hctx->cpumask,
2434 				cpu_online_mask);
2435 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2436 	}
2437 }
2438 
2439 /*
2440  * Caller needs to ensure that we're either frozen/quiesced, or that
2441  * the queue isn't live yet.
2442  */
2443 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
2444 {
2445 	struct blk_mq_hw_ctx *hctx;
2446 	int i;
2447 
2448 	queue_for_each_hw_ctx(q, hctx, i) {
2449 		if (shared) {
2450 			if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2451 				atomic_inc(&q->shared_hctx_restart);
2452 			hctx->flags |= BLK_MQ_F_TAG_SHARED;
2453 		} else {
2454 			if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2455 				atomic_dec(&q->shared_hctx_restart);
2456 			hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
2457 		}
2458 	}
2459 }
2460 
2461 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
2462 					bool shared)
2463 {
2464 	struct request_queue *q;
2465 
2466 	lockdep_assert_held(&set->tag_list_lock);
2467 
2468 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
2469 		blk_mq_freeze_queue(q);
2470 		queue_set_hctx_shared(q, shared);
2471 		blk_mq_unfreeze_queue(q);
2472 	}
2473 }
2474 
2475 static void blk_mq_del_queue_tag_set(struct request_queue *q)
2476 {
2477 	struct blk_mq_tag_set *set = q->tag_set;
2478 
2479 	mutex_lock(&set->tag_list_lock);
2480 	list_del_rcu(&q->tag_set_list);
2481 	INIT_LIST_HEAD(&q->tag_set_list);
2482 	if (list_is_singular(&set->tag_list)) {
2483 		/* just transitioned to unshared */
2484 		set->flags &= ~BLK_MQ_F_TAG_SHARED;
2485 		/* update existing queue */
2486 		blk_mq_update_tag_set_depth(set, false);
2487 	}
2488 	mutex_unlock(&set->tag_list_lock);
2489 
2490 	synchronize_rcu();
2491 }
2492 
2493 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
2494 				     struct request_queue *q)
2495 {
2496 	q->tag_set = set;
2497 
2498 	mutex_lock(&set->tag_list_lock);
2499 
2500 	/*
2501 	 * Check to see if we're transitioning to shared (from 1 to 2 queues).
2502 	 */
2503 	if (!list_empty(&set->tag_list) &&
2504 	    !(set->flags & BLK_MQ_F_TAG_SHARED)) {
2505 		set->flags |= BLK_MQ_F_TAG_SHARED;
2506 		/* update existing queue */
2507 		blk_mq_update_tag_set_depth(set, true);
2508 	}
2509 	if (set->flags & BLK_MQ_F_TAG_SHARED)
2510 		queue_set_hctx_shared(q, true);
2511 	list_add_tail_rcu(&q->tag_set_list, &set->tag_list);
2512 
2513 	mutex_unlock(&set->tag_list_lock);
2514 }
2515 
2516 /*
2517  * It is the actual release handler for mq, but we do it from
2518  * request queue's release handler for avoiding use-after-free
2519  * and headache because q->mq_kobj shouldn't have been introduced,
2520  * but we can't group ctx/kctx kobj without it.
2521  */
2522 void blk_mq_release(struct request_queue *q)
2523 {
2524 	struct blk_mq_hw_ctx *hctx;
2525 	unsigned int i;
2526 
2527 	/* hctx kobj stays in hctx */
2528 	queue_for_each_hw_ctx(q, hctx, i) {
2529 		if (!hctx)
2530 			continue;
2531 		kobject_put(&hctx->kobj);
2532 	}
2533 
2534 	q->mq_map = NULL;
2535 
2536 	kfree(q->queue_hw_ctx);
2537 
2538 	/*
2539 	 * release .mq_kobj and sw queue's kobject now because
2540 	 * both share lifetime with request queue.
2541 	 */
2542 	blk_mq_sysfs_deinit(q);
2543 
2544 	free_percpu(q->queue_ctx);
2545 }
2546 
2547 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
2548 {
2549 	struct request_queue *uninit_q, *q;
2550 
2551 	uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node, NULL);
2552 	if (!uninit_q)
2553 		return ERR_PTR(-ENOMEM);
2554 
2555 	q = blk_mq_init_allocated_queue(set, uninit_q);
2556 	if (IS_ERR(q))
2557 		blk_cleanup_queue(uninit_q);
2558 
2559 	return q;
2560 }
2561 EXPORT_SYMBOL(blk_mq_init_queue);
2562 
2563 static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
2564 {
2565 	int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
2566 
2567 	BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
2568 			   __alignof__(struct blk_mq_hw_ctx)) !=
2569 		     sizeof(struct blk_mq_hw_ctx));
2570 
2571 	if (tag_set->flags & BLK_MQ_F_BLOCKING)
2572 		hw_ctx_size += sizeof(struct srcu_struct);
2573 
2574 	return hw_ctx_size;
2575 }
2576 
2577 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2578 						struct request_queue *q)
2579 {
2580 	int i, j;
2581 	struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
2582 
2583 	blk_mq_sysfs_unregister(q);
2584 
2585 	/* protect against switching io scheduler  */
2586 	mutex_lock(&q->sysfs_lock);
2587 	for (i = 0; i < set->nr_hw_queues; i++) {
2588 		int node;
2589 
2590 		if (hctxs[i])
2591 			continue;
2592 
2593 		node = blk_mq_hw_queue_to_node(q->mq_map, i);
2594 		hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set),
2595 					GFP_KERNEL, node);
2596 		if (!hctxs[i])
2597 			break;
2598 
2599 		if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
2600 						node)) {
2601 			kfree(hctxs[i]);
2602 			hctxs[i] = NULL;
2603 			break;
2604 		}
2605 
2606 		atomic_set(&hctxs[i]->nr_active, 0);
2607 		hctxs[i]->numa_node = node;
2608 		hctxs[i]->queue_num = i;
2609 
2610 		if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
2611 			free_cpumask_var(hctxs[i]->cpumask);
2612 			kfree(hctxs[i]);
2613 			hctxs[i] = NULL;
2614 			break;
2615 		}
2616 		blk_mq_hctx_kobj_init(hctxs[i]);
2617 	}
2618 	for (j = i; j < q->nr_hw_queues; j++) {
2619 		struct blk_mq_hw_ctx *hctx = hctxs[j];
2620 
2621 		if (hctx) {
2622 			if (hctx->tags)
2623 				blk_mq_free_map_and_requests(set, j);
2624 			blk_mq_exit_hctx(q, set, hctx, j);
2625 			kobject_put(&hctx->kobj);
2626 			hctxs[j] = NULL;
2627 
2628 		}
2629 	}
2630 	q->nr_hw_queues = i;
2631 	mutex_unlock(&q->sysfs_lock);
2632 	blk_mq_sysfs_register(q);
2633 }
2634 
2635 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2636 						  struct request_queue *q)
2637 {
2638 	/* mark the queue as mq asap */
2639 	q->mq_ops = set->ops;
2640 
2641 	q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
2642 					     blk_mq_poll_stats_bkt,
2643 					     BLK_MQ_POLL_STATS_BKTS, q);
2644 	if (!q->poll_cb)
2645 		goto err_exit;
2646 
2647 	q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2648 	if (!q->queue_ctx)
2649 		goto err_exit;
2650 
2651 	/* init q->mq_kobj and sw queues' kobjects */
2652 	blk_mq_sysfs_init(q);
2653 
2654 	q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
2655 						GFP_KERNEL, set->numa_node);
2656 	if (!q->queue_hw_ctx)
2657 		goto err_percpu;
2658 
2659 	q->mq_map = set->mq_map;
2660 
2661 	blk_mq_realloc_hw_ctxs(set, q);
2662 	if (!q->nr_hw_queues)
2663 		goto err_hctxs;
2664 
2665 	INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
2666 	blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
2667 
2668 	q->nr_queues = nr_cpu_ids;
2669 
2670 	q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
2671 
2672 	if (!(set->flags & BLK_MQ_F_SG_MERGE))
2673 		queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
2674 
2675 	q->sg_reserved_size = INT_MAX;
2676 
2677 	INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
2678 	INIT_LIST_HEAD(&q->requeue_list);
2679 	spin_lock_init(&q->requeue_lock);
2680 
2681 	blk_queue_make_request(q, blk_mq_make_request);
2682 	if (q->mq_ops->poll)
2683 		q->poll_fn = blk_mq_poll;
2684 
2685 	/*
2686 	 * Do this after blk_queue_make_request() overrides it...
2687 	 */
2688 	q->nr_requests = set->queue_depth;
2689 
2690 	/*
2691 	 * Default to classic polling
2692 	 */
2693 	q->poll_nsec = -1;
2694 
2695 	if (set->ops->complete)
2696 		blk_queue_softirq_done(q, set->ops->complete);
2697 
2698 	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
2699 	blk_mq_add_queue_tag_set(set, q);
2700 	blk_mq_map_swqueue(q);
2701 
2702 	if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
2703 		int ret;
2704 
2705 		ret = blk_mq_sched_init(q);
2706 		if (ret)
2707 			return ERR_PTR(ret);
2708 	}
2709 
2710 	return q;
2711 
2712 err_hctxs:
2713 	kfree(q->queue_hw_ctx);
2714 err_percpu:
2715 	free_percpu(q->queue_ctx);
2716 err_exit:
2717 	q->mq_ops = NULL;
2718 	return ERR_PTR(-ENOMEM);
2719 }
2720 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2721 
2722 void blk_mq_free_queue(struct request_queue *q)
2723 {
2724 	struct blk_mq_tag_set	*set = q->tag_set;
2725 
2726 	blk_mq_del_queue_tag_set(q);
2727 	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2728 }
2729 
2730 /* Basically redo blk_mq_init_queue with queue frozen */
2731 static void blk_mq_queue_reinit(struct request_queue *q)
2732 {
2733 	WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
2734 
2735 	blk_mq_debugfs_unregister_hctxs(q);
2736 	blk_mq_sysfs_unregister(q);
2737 
2738 	/*
2739 	 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2740 	 * we should change hctx numa_node according to the new topology (this
2741 	 * involves freeing and re-allocating memory, worth doing?)
2742 	 */
2743 	blk_mq_map_swqueue(q);
2744 
2745 	blk_mq_sysfs_register(q);
2746 	blk_mq_debugfs_register_hctxs(q);
2747 }
2748 
2749 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2750 {
2751 	int i;
2752 
2753 	for (i = 0; i < set->nr_hw_queues; i++)
2754 		if (!__blk_mq_alloc_rq_map(set, i))
2755 			goto out_unwind;
2756 
2757 	return 0;
2758 
2759 out_unwind:
2760 	while (--i >= 0)
2761 		blk_mq_free_rq_map(set->tags[i]);
2762 
2763 	return -ENOMEM;
2764 }
2765 
2766 /*
2767  * Allocate the request maps associated with this tag_set. Note that this
2768  * may reduce the depth asked for, if memory is tight. set->queue_depth
2769  * will be updated to reflect the allocated depth.
2770  */
2771 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2772 {
2773 	unsigned int depth;
2774 	int err;
2775 
2776 	depth = set->queue_depth;
2777 	do {
2778 		err = __blk_mq_alloc_rq_maps(set);
2779 		if (!err)
2780 			break;
2781 
2782 		set->queue_depth >>= 1;
2783 		if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2784 			err = -ENOMEM;
2785 			break;
2786 		}
2787 	} while (set->queue_depth);
2788 
2789 	if (!set->queue_depth || err) {
2790 		pr_err("blk-mq: failed to allocate request map\n");
2791 		return -ENOMEM;
2792 	}
2793 
2794 	if (depth != set->queue_depth)
2795 		pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2796 						depth, set->queue_depth);
2797 
2798 	return 0;
2799 }
2800 
2801 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
2802 {
2803 	if (set->ops->map_queues) {
2804 		int cpu;
2805 		/*
2806 		 * transport .map_queues is usually done in the following
2807 		 * way:
2808 		 *
2809 		 * for (queue = 0; queue < set->nr_hw_queues; queue++) {
2810 		 * 	mask = get_cpu_mask(queue)
2811 		 * 	for_each_cpu(cpu, mask)
2812 		 * 		set->mq_map[cpu] = queue;
2813 		 * }
2814 		 *
2815 		 * When we need to remap, the table has to be cleared for
2816 		 * killing stale mapping since one CPU may not be mapped
2817 		 * to any hw queue.
2818 		 */
2819 		for_each_possible_cpu(cpu)
2820 			set->mq_map[cpu] = 0;
2821 
2822 		return set->ops->map_queues(set);
2823 	} else
2824 		return blk_mq_map_queues(set);
2825 }
2826 
2827 /*
2828  * Alloc a tag set to be associated with one or more request queues.
2829  * May fail with EINVAL for various error conditions. May adjust the
2830  * requested depth down, if if it too large. In that case, the set
2831  * value will be stored in set->queue_depth.
2832  */
2833 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2834 {
2835 	int ret;
2836 
2837 	BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2838 
2839 	if (!set->nr_hw_queues)
2840 		return -EINVAL;
2841 	if (!set->queue_depth)
2842 		return -EINVAL;
2843 	if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2844 		return -EINVAL;
2845 
2846 	if (!set->ops->queue_rq)
2847 		return -EINVAL;
2848 
2849 	if (!set->ops->get_budget ^ !set->ops->put_budget)
2850 		return -EINVAL;
2851 
2852 	if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2853 		pr_info("blk-mq: reduced tag depth to %u\n",
2854 			BLK_MQ_MAX_DEPTH);
2855 		set->queue_depth = BLK_MQ_MAX_DEPTH;
2856 	}
2857 
2858 	/*
2859 	 * If a crashdump is active, then we are potentially in a very
2860 	 * memory constrained environment. Limit us to 1 queue and
2861 	 * 64 tags to prevent using too much memory.
2862 	 */
2863 	if (is_kdump_kernel()) {
2864 		set->nr_hw_queues = 1;
2865 		set->queue_depth = min(64U, set->queue_depth);
2866 	}
2867 	/*
2868 	 * There is no use for more h/w queues than cpus.
2869 	 */
2870 	if (set->nr_hw_queues > nr_cpu_ids)
2871 		set->nr_hw_queues = nr_cpu_ids;
2872 
2873 	set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
2874 				 GFP_KERNEL, set->numa_node);
2875 	if (!set->tags)
2876 		return -ENOMEM;
2877 
2878 	ret = -ENOMEM;
2879 	set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
2880 			GFP_KERNEL, set->numa_node);
2881 	if (!set->mq_map)
2882 		goto out_free_tags;
2883 
2884 	ret = blk_mq_update_queue_map(set);
2885 	if (ret)
2886 		goto out_free_mq_map;
2887 
2888 	ret = blk_mq_alloc_rq_maps(set);
2889 	if (ret)
2890 		goto out_free_mq_map;
2891 
2892 	mutex_init(&set->tag_list_lock);
2893 	INIT_LIST_HEAD(&set->tag_list);
2894 
2895 	return 0;
2896 
2897 out_free_mq_map:
2898 	kfree(set->mq_map);
2899 	set->mq_map = NULL;
2900 out_free_tags:
2901 	kfree(set->tags);
2902 	set->tags = NULL;
2903 	return ret;
2904 }
2905 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2906 
2907 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2908 {
2909 	int i;
2910 
2911 	for (i = 0; i < nr_cpu_ids; i++)
2912 		blk_mq_free_map_and_requests(set, i);
2913 
2914 	kfree(set->mq_map);
2915 	set->mq_map = NULL;
2916 
2917 	kfree(set->tags);
2918 	set->tags = NULL;
2919 }
2920 EXPORT_SYMBOL(blk_mq_free_tag_set);
2921 
2922 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2923 {
2924 	struct blk_mq_tag_set *set = q->tag_set;
2925 	struct blk_mq_hw_ctx *hctx;
2926 	int i, ret;
2927 
2928 	if (!set)
2929 		return -EINVAL;
2930 
2931 	blk_mq_freeze_queue(q);
2932 	blk_mq_quiesce_queue(q);
2933 
2934 	ret = 0;
2935 	queue_for_each_hw_ctx(q, hctx, i) {
2936 		if (!hctx->tags)
2937 			continue;
2938 		/*
2939 		 * If we're using an MQ scheduler, just update the scheduler
2940 		 * queue depth. This is similar to what the old code would do.
2941 		 */
2942 		if (!hctx->sched_tags) {
2943 			ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
2944 							false);
2945 		} else {
2946 			ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
2947 							nr, true);
2948 		}
2949 		if (ret)
2950 			break;
2951 	}
2952 
2953 	if (!ret)
2954 		q->nr_requests = nr;
2955 
2956 	blk_mq_unquiesce_queue(q);
2957 	blk_mq_unfreeze_queue(q);
2958 
2959 	return ret;
2960 }
2961 
2962 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
2963 							int nr_hw_queues)
2964 {
2965 	struct request_queue *q;
2966 
2967 	lockdep_assert_held(&set->tag_list_lock);
2968 
2969 	if (nr_hw_queues > nr_cpu_ids)
2970 		nr_hw_queues = nr_cpu_ids;
2971 	if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
2972 		return;
2973 
2974 	list_for_each_entry(q, &set->tag_list, tag_set_list)
2975 		blk_mq_freeze_queue(q);
2976 
2977 	set->nr_hw_queues = nr_hw_queues;
2978 	blk_mq_update_queue_map(set);
2979 	list_for_each_entry(q, &set->tag_list, tag_set_list) {
2980 		blk_mq_realloc_hw_ctxs(set, q);
2981 		blk_mq_queue_reinit(q);
2982 	}
2983 
2984 	list_for_each_entry(q, &set->tag_list, tag_set_list)
2985 		blk_mq_unfreeze_queue(q);
2986 }
2987 
2988 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2989 {
2990 	mutex_lock(&set->tag_list_lock);
2991 	__blk_mq_update_nr_hw_queues(set, nr_hw_queues);
2992 	mutex_unlock(&set->tag_list_lock);
2993 }
2994 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
2995 
2996 /* Enable polling stats and return whether they were already enabled. */
2997 static bool blk_poll_stats_enable(struct request_queue *q)
2998 {
2999 	if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
3000 	    blk_queue_flag_test_and_set(QUEUE_FLAG_POLL_STATS, q))
3001 		return true;
3002 	blk_stat_add_callback(q, q->poll_cb);
3003 	return false;
3004 }
3005 
3006 static void blk_mq_poll_stats_start(struct request_queue *q)
3007 {
3008 	/*
3009 	 * We don't arm the callback if polling stats are not enabled or the
3010 	 * callback is already active.
3011 	 */
3012 	if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
3013 	    blk_stat_is_active(q->poll_cb))
3014 		return;
3015 
3016 	blk_stat_activate_msecs(q->poll_cb, 100);
3017 }
3018 
3019 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
3020 {
3021 	struct request_queue *q = cb->data;
3022 	int bucket;
3023 
3024 	for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
3025 		if (cb->stat[bucket].nr_samples)
3026 			q->poll_stat[bucket] = cb->stat[bucket];
3027 	}
3028 }
3029 
3030 static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
3031 				       struct blk_mq_hw_ctx *hctx,
3032 				       struct request *rq)
3033 {
3034 	unsigned long ret = 0;
3035 	int bucket;
3036 
3037 	/*
3038 	 * If stats collection isn't on, don't sleep but turn it on for
3039 	 * future users
3040 	 */
3041 	if (!blk_poll_stats_enable(q))
3042 		return 0;
3043 
3044 	/*
3045 	 * As an optimistic guess, use half of the mean service time
3046 	 * for this type of request. We can (and should) make this smarter.
3047 	 * For instance, if the completion latencies are tight, we can
3048 	 * get closer than just half the mean. This is especially
3049 	 * important on devices where the completion latencies are longer
3050 	 * than ~10 usec. We do use the stats for the relevant IO size
3051 	 * if available which does lead to better estimates.
3052 	 */
3053 	bucket = blk_mq_poll_stats_bkt(rq);
3054 	if (bucket < 0)
3055 		return ret;
3056 
3057 	if (q->poll_stat[bucket].nr_samples)
3058 		ret = (q->poll_stat[bucket].mean + 1) / 2;
3059 
3060 	return ret;
3061 }
3062 
3063 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
3064 				     struct blk_mq_hw_ctx *hctx,
3065 				     struct request *rq)
3066 {
3067 	struct hrtimer_sleeper hs;
3068 	enum hrtimer_mode mode;
3069 	unsigned int nsecs;
3070 	ktime_t kt;
3071 
3072 	if (rq->rq_flags & RQF_MQ_POLL_SLEPT)
3073 		return false;
3074 
3075 	/*
3076 	 * poll_nsec can be:
3077 	 *
3078 	 * -1:	don't ever hybrid sleep
3079 	 *  0:	use half of prev avg
3080 	 * >0:	use this specific value
3081 	 */
3082 	if (q->poll_nsec == -1)
3083 		return false;
3084 	else if (q->poll_nsec > 0)
3085 		nsecs = q->poll_nsec;
3086 	else
3087 		nsecs = blk_mq_poll_nsecs(q, hctx, rq);
3088 
3089 	if (!nsecs)
3090 		return false;
3091 
3092 	rq->rq_flags |= RQF_MQ_POLL_SLEPT;
3093 
3094 	/*
3095 	 * This will be replaced with the stats tracking code, using
3096 	 * 'avg_completion_time / 2' as the pre-sleep target.
3097 	 */
3098 	kt = nsecs;
3099 
3100 	mode = HRTIMER_MODE_REL;
3101 	hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
3102 	hrtimer_set_expires(&hs.timer, kt);
3103 
3104 	hrtimer_init_sleeper(&hs, current);
3105 	do {
3106 		if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
3107 			break;
3108 		set_current_state(TASK_UNINTERRUPTIBLE);
3109 		hrtimer_start_expires(&hs.timer, mode);
3110 		if (hs.task)
3111 			io_schedule();
3112 		hrtimer_cancel(&hs.timer);
3113 		mode = HRTIMER_MODE_ABS;
3114 	} while (hs.task && !signal_pending(current));
3115 
3116 	__set_current_state(TASK_RUNNING);
3117 	destroy_hrtimer_on_stack(&hs.timer);
3118 	return true;
3119 }
3120 
3121 static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
3122 {
3123 	struct request_queue *q = hctx->queue;
3124 	long state;
3125 
3126 	/*
3127 	 * If we sleep, have the caller restart the poll loop to reset
3128 	 * the state. Like for the other success return cases, the
3129 	 * caller is responsible for checking if the IO completed. If
3130 	 * the IO isn't complete, we'll get called again and will go
3131 	 * straight to the busy poll loop.
3132 	 */
3133 	if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
3134 		return true;
3135 
3136 	hctx->poll_considered++;
3137 
3138 	state = current->state;
3139 	while (!need_resched()) {
3140 		int ret;
3141 
3142 		hctx->poll_invoked++;
3143 
3144 		ret = q->mq_ops->poll(hctx, rq->tag);
3145 		if (ret > 0) {
3146 			hctx->poll_success++;
3147 			set_current_state(TASK_RUNNING);
3148 			return true;
3149 		}
3150 
3151 		if (signal_pending_state(state, current))
3152 			set_current_state(TASK_RUNNING);
3153 
3154 		if (current->state == TASK_RUNNING)
3155 			return true;
3156 		if (ret < 0)
3157 			break;
3158 		cpu_relax();
3159 	}
3160 
3161 	__set_current_state(TASK_RUNNING);
3162 	return false;
3163 }
3164 
3165 static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
3166 {
3167 	struct blk_mq_hw_ctx *hctx;
3168 	struct request *rq;
3169 
3170 	if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
3171 		return false;
3172 
3173 	hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
3174 	if (!blk_qc_t_is_internal(cookie))
3175 		rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
3176 	else {
3177 		rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
3178 		/*
3179 		 * With scheduling, if the request has completed, we'll
3180 		 * get a NULL return here, as we clear the sched tag when
3181 		 * that happens. The request still remains valid, like always,
3182 		 * so we should be safe with just the NULL check.
3183 		 */
3184 		if (!rq)
3185 			return false;
3186 	}
3187 
3188 	return __blk_mq_poll(hctx, rq);
3189 }
3190 
3191 static int __init blk_mq_init(void)
3192 {
3193 	cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
3194 				blk_mq_hctx_notify_dead);
3195 	return 0;
3196 }
3197 subsys_initcall(blk_mq_init);
3198