xref: /linux/drivers/md/dm-rq.c (revision 7aacf86b75bc5523d20fd9127104384fce51ce9c)
1 /*
2  * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
3  *
4  * This file is released under the GPL.
5  */
6 
7 #include "dm-core.h"
8 #include "dm-rq.h"
9 
10 #include <linux/elevator.h> /* for rq_end_sector() */
11 #include <linux/blk-mq.h>
12 
13 #define DM_MSG_PREFIX "core-rq"
14 
15 #define DM_MQ_NR_HW_QUEUES 1
16 #define DM_MQ_QUEUE_DEPTH 2048
17 static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
18 static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
19 
20 /*
21  * Request-based DM's mempools' reserved IOs set by the user.
22  */
23 #define RESERVED_REQUEST_BASED_IOS	256
24 static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
25 
26 static bool use_blk_mq = IS_ENABLED(CONFIG_DM_MQ_DEFAULT);
27 
28 bool dm_use_blk_mq_default(void)
29 {
30 	return use_blk_mq;
31 }
32 
33 bool dm_use_blk_mq(struct mapped_device *md)
34 {
35 	return md->use_blk_mq;
36 }
37 EXPORT_SYMBOL_GPL(dm_use_blk_mq);
38 
39 unsigned dm_get_reserved_rq_based_ios(void)
40 {
41 	return __dm_get_module_param(&reserved_rq_based_ios,
42 				     RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS);
43 }
44 EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
45 
46 static unsigned dm_get_blk_mq_nr_hw_queues(void)
47 {
48 	return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
49 }
50 
51 static unsigned dm_get_blk_mq_queue_depth(void)
52 {
53 	return __dm_get_module_param(&dm_mq_queue_depth,
54 				     DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
55 }
56 
57 int dm_request_based(struct mapped_device *md)
58 {
59 	return blk_queue_stackable(md->queue);
60 }
61 
62 static void dm_old_start_queue(struct request_queue *q)
63 {
64 	unsigned long flags;
65 
66 	spin_lock_irqsave(q->queue_lock, flags);
67 	if (blk_queue_stopped(q))
68 		blk_start_queue(q);
69 	spin_unlock_irqrestore(q->queue_lock, flags);
70 }
71 
72 static void dm_mq_start_queue(struct request_queue *q)
73 {
74 	blk_mq_unquiesce_queue(q);
75 	blk_mq_kick_requeue_list(q);
76 }
77 
78 void dm_start_queue(struct request_queue *q)
79 {
80 	if (!q->mq_ops)
81 		dm_old_start_queue(q);
82 	else
83 		dm_mq_start_queue(q);
84 }
85 
86 static void dm_old_stop_queue(struct request_queue *q)
87 {
88 	unsigned long flags;
89 
90 	spin_lock_irqsave(q->queue_lock, flags);
91 	if (!blk_queue_stopped(q))
92 		blk_stop_queue(q);
93 	spin_unlock_irqrestore(q->queue_lock, flags);
94 }
95 
96 static void dm_mq_stop_queue(struct request_queue *q)
97 {
98 	if (blk_mq_queue_stopped(q))
99 		return;
100 
101 	blk_mq_quiesce_queue(q);
102 }
103 
104 void dm_stop_queue(struct request_queue *q)
105 {
106 	if (!q->mq_ops)
107 		dm_old_stop_queue(q);
108 	else
109 		dm_mq_stop_queue(q);
110 }
111 
112 /*
113  * Partial completion handling for request-based dm
114  */
115 static void end_clone_bio(struct bio *clone)
116 {
117 	struct dm_rq_clone_bio_info *info =
118 		container_of(clone, struct dm_rq_clone_bio_info, clone);
119 	struct dm_rq_target_io *tio = info->tio;
120 	struct bio *bio = info->orig;
121 	unsigned int nr_bytes = info->orig->bi_iter.bi_size;
122 	blk_status_t error = clone->bi_status;
123 
124 	bio_put(clone);
125 
126 	if (tio->error)
127 		/*
128 		 * An error has already been detected on the request.
129 		 * Once error occurred, just let clone->end_io() handle
130 		 * the remainder.
131 		 */
132 		return;
133 	else if (error) {
134 		/*
135 		 * Don't notice the error to the upper layer yet.
136 		 * The error handling decision is made by the target driver,
137 		 * when the request is completed.
138 		 */
139 		tio->error = error;
140 		return;
141 	}
142 
143 	/*
144 	 * I/O for the bio successfully completed.
145 	 * Notice the data completion to the upper layer.
146 	 */
147 
148 	/*
149 	 * bios are processed from the head of the list.
150 	 * So the completing bio should always be rq->bio.
151 	 * If it's not, something wrong is happening.
152 	 */
153 	if (tio->orig->bio != bio)
154 		DMERR("bio completion is going in the middle of the request");
155 
156 	/*
157 	 * Update the original request.
158 	 * Do not use blk_end_request() here, because it may complete
159 	 * the original request before the clone, and break the ordering.
160 	 */
161 	blk_update_request(tio->orig, BLK_STS_OK, nr_bytes);
162 }
163 
164 static struct dm_rq_target_io *tio_from_request(struct request *rq)
165 {
166 	return blk_mq_rq_to_pdu(rq);
167 }
168 
169 static void rq_end_stats(struct mapped_device *md, struct request *orig)
170 {
171 	if (unlikely(dm_stats_used(&md->stats))) {
172 		struct dm_rq_target_io *tio = tio_from_request(orig);
173 		tio->duration_jiffies = jiffies - tio->duration_jiffies;
174 		dm_stats_account_io(&md->stats, rq_data_dir(orig),
175 				    blk_rq_pos(orig), tio->n_sectors, true,
176 				    tio->duration_jiffies, &tio->stats_aux);
177 	}
178 }
179 
180 /*
181  * Don't touch any member of the md after calling this function because
182  * the md may be freed in dm_put() at the end of this function.
183  * Or do dm_get() before calling this function and dm_put() later.
184  */
185 static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
186 {
187 	struct request_queue *q = md->queue;
188 	unsigned long flags;
189 
190 	atomic_dec(&md->pending[rw]);
191 
192 	/* nudge anyone waiting on suspend queue */
193 	if (!md_in_flight(md))
194 		wake_up(&md->wait);
195 
196 	/*
197 	 * Run this off this callpath, as drivers could invoke end_io while
198 	 * inside their request_fn (and holding the queue lock). Calling
199 	 * back into ->request_fn() could deadlock attempting to grab the
200 	 * queue lock again.
201 	 */
202 	if (!q->mq_ops && run_queue) {
203 		spin_lock_irqsave(q->queue_lock, flags);
204 		blk_run_queue_async(q);
205 		spin_unlock_irqrestore(q->queue_lock, flags);
206 	}
207 
208 	/*
209 	 * dm_put() must be at the end of this function. See the comment above
210 	 */
211 	dm_put(md);
212 }
213 
214 /*
215  * Complete the clone and the original request.
216  * Must be called without clone's queue lock held,
217  * see end_clone_request() for more details.
218  */
219 static void dm_end_request(struct request *clone, blk_status_t error)
220 {
221 	int rw = rq_data_dir(clone);
222 	struct dm_rq_target_io *tio = clone->end_io_data;
223 	struct mapped_device *md = tio->md;
224 	struct request *rq = tio->orig;
225 
226 	blk_rq_unprep_clone(clone);
227 	tio->ti->type->release_clone_rq(clone);
228 
229 	rq_end_stats(md, rq);
230 	if (!rq->q->mq_ops)
231 		blk_end_request_all(rq, error);
232 	else
233 		blk_mq_end_request(rq, error);
234 	rq_completed(md, rw, true);
235 }
236 
237 /*
238  * Requeue the original request of a clone.
239  */
240 static void dm_old_requeue_request(struct request *rq)
241 {
242 	struct request_queue *q = rq->q;
243 	unsigned long flags;
244 
245 	spin_lock_irqsave(q->queue_lock, flags);
246 	blk_requeue_request(q, rq);
247 	blk_run_queue_async(q);
248 	spin_unlock_irqrestore(q->queue_lock, flags);
249 }
250 
251 static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
252 {
253 	blk_mq_delay_kick_requeue_list(q, msecs);
254 }
255 
256 void dm_mq_kick_requeue_list(struct mapped_device *md)
257 {
258 	__dm_mq_kick_requeue_list(dm_get_md_queue(md), 0);
259 }
260 EXPORT_SYMBOL(dm_mq_kick_requeue_list);
261 
262 static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
263 {
264 	blk_mq_requeue_request(rq, false);
265 	__dm_mq_kick_requeue_list(rq->q, msecs);
266 }
267 
268 static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue)
269 {
270 	struct mapped_device *md = tio->md;
271 	struct request *rq = tio->orig;
272 	int rw = rq_data_dir(rq);
273 
274 	rq_end_stats(md, rq);
275 	if (tio->clone) {
276 		blk_rq_unprep_clone(tio->clone);
277 		tio->ti->type->release_clone_rq(tio->clone);
278 	}
279 
280 	if (!rq->q->mq_ops)
281 		dm_old_requeue_request(rq);
282 	else
283 		dm_mq_delay_requeue_request(rq, delay_requeue ? 100/*ms*/ : 0);
284 
285 	rq_completed(md, rw, false);
286 }
287 
288 static void dm_done(struct request *clone, blk_status_t error, bool mapped)
289 {
290 	int r = DM_ENDIO_DONE;
291 	struct dm_rq_target_io *tio = clone->end_io_data;
292 	dm_request_endio_fn rq_end_io = NULL;
293 
294 	if (tio->ti) {
295 		rq_end_io = tio->ti->type->rq_end_io;
296 
297 		if (mapped && rq_end_io)
298 			r = rq_end_io(tio->ti, clone, error, &tio->info);
299 	}
300 
301 	if (unlikely(error == BLK_STS_TARGET)) {
302 		if (req_op(clone) == REQ_OP_WRITE_SAME &&
303 		    !clone->q->limits.max_write_same_sectors)
304 			disable_write_same(tio->md);
305 		if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
306 		    !clone->q->limits.max_write_zeroes_sectors)
307 			disable_write_zeroes(tio->md);
308 	}
309 
310 	switch (r) {
311 	case DM_ENDIO_DONE:
312 		/* The target wants to complete the I/O */
313 		dm_end_request(clone, error);
314 		break;
315 	case DM_ENDIO_INCOMPLETE:
316 		/* The target will handle the I/O */
317 		return;
318 	case DM_ENDIO_REQUEUE:
319 		/* The target wants to requeue the I/O */
320 		dm_requeue_original_request(tio, false);
321 		break;
322 	default:
323 		DMWARN("unimplemented target endio return value: %d", r);
324 		BUG();
325 	}
326 }
327 
328 /*
329  * Request completion handler for request-based dm
330  */
331 static void dm_softirq_done(struct request *rq)
332 {
333 	bool mapped = true;
334 	struct dm_rq_target_io *tio = tio_from_request(rq);
335 	struct request *clone = tio->clone;
336 	int rw;
337 
338 	if (!clone) {
339 		struct mapped_device *md = tio->md;
340 
341 		rq_end_stats(md, rq);
342 		rw = rq_data_dir(rq);
343 		if (!rq->q->mq_ops)
344 			blk_end_request_all(rq, tio->error);
345 		else
346 			blk_mq_end_request(rq, tio->error);
347 		rq_completed(md, rw, false);
348 		return;
349 	}
350 
351 	if (rq->rq_flags & RQF_FAILED)
352 		mapped = false;
353 
354 	dm_done(clone, tio->error, mapped);
355 }
356 
357 /*
358  * Complete the clone and the original request with the error status
359  * through softirq context.
360  */
361 static void dm_complete_request(struct request *rq, blk_status_t error)
362 {
363 	struct dm_rq_target_io *tio = tio_from_request(rq);
364 
365 	tio->error = error;
366 	if (!rq->q->mq_ops)
367 		blk_complete_request(rq);
368 	else
369 		blk_mq_complete_request(rq);
370 }
371 
372 /*
373  * Complete the not-mapped clone and the original request with the error status
374  * through softirq context.
375  * Target's rq_end_io() function isn't called.
376  * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
377  */
378 static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
379 {
380 	rq->rq_flags |= RQF_FAILED;
381 	dm_complete_request(rq, error);
382 }
383 
384 /*
385  * Called with the clone's queue lock held (in the case of .request_fn)
386  */
387 static void end_clone_request(struct request *clone, blk_status_t error)
388 {
389 	struct dm_rq_target_io *tio = clone->end_io_data;
390 
391 	/*
392 	 * Actual request completion is done in a softirq context which doesn't
393 	 * hold the clone's queue lock.  Otherwise, deadlock could occur because:
394 	 *     - another request may be submitted by the upper level driver
395 	 *       of the stacking during the completion
396 	 *     - the submission which requires queue lock may be done
397 	 *       against this clone's queue
398 	 */
399 	dm_complete_request(tio->orig, error);
400 }
401 
402 static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
403 {
404 	blk_status_t r;
405 
406 	if (blk_queue_io_stat(clone->q))
407 		clone->rq_flags |= RQF_IO_STAT;
408 
409 	clone->start_time = jiffies;
410 	r = blk_insert_cloned_request(clone->q, clone);
411 	if (r)
412 		/* must complete clone in terms of original request */
413 		dm_complete_request(rq, r);
414 }
415 
416 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
417 				 void *data)
418 {
419 	struct dm_rq_target_io *tio = data;
420 	struct dm_rq_clone_bio_info *info =
421 		container_of(bio, struct dm_rq_clone_bio_info, clone);
422 
423 	info->orig = bio_orig;
424 	info->tio = tio;
425 	bio->bi_end_io = end_clone_bio;
426 
427 	return 0;
428 }
429 
430 static int setup_clone(struct request *clone, struct request *rq,
431 		       struct dm_rq_target_io *tio, gfp_t gfp_mask)
432 {
433 	int r;
434 
435 	r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask,
436 			      dm_rq_bio_constructor, tio);
437 	if (r)
438 		return r;
439 
440 	clone->end_io = end_clone_request;
441 	clone->end_io_data = tio;
442 
443 	tio->clone = clone;
444 
445 	return 0;
446 }
447 
448 static void map_tio_request(struct kthread_work *work);
449 
450 static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
451 		     struct mapped_device *md)
452 {
453 	tio->md = md;
454 	tio->ti = NULL;
455 	tio->clone = NULL;
456 	tio->orig = rq;
457 	tio->error = 0;
458 	/*
459 	 * Avoid initializing info for blk-mq; it passes
460 	 * target-specific data through info.ptr
461 	 * (see: dm_mq_init_request)
462 	 */
463 	if (!md->init_tio_pdu)
464 		memset(&tio->info, 0, sizeof(tio->info));
465 	if (md->kworker_task)
466 		kthread_init_work(&tio->work, map_tio_request);
467 }
468 
469 /*
470  * Returns:
471  * DM_MAPIO_*       : the request has been processed as indicated
472  * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
473  * < 0              : the request was completed due to failure
474  */
475 static int map_request(struct dm_rq_target_io *tio)
476 {
477 	int r;
478 	struct dm_target *ti = tio->ti;
479 	struct mapped_device *md = tio->md;
480 	struct request *rq = tio->orig;
481 	struct request *clone = NULL;
482 
483 	r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
484 	switch (r) {
485 	case DM_MAPIO_SUBMITTED:
486 		/* The target has taken the I/O to submit by itself later */
487 		break;
488 	case DM_MAPIO_REMAPPED:
489 		if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
490 			/* -ENOMEM */
491 			ti->type->release_clone_rq(clone);
492 			return DM_MAPIO_REQUEUE;
493 		}
494 
495 		/* The target has remapped the I/O so dispatch it */
496 		trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
497 				     blk_rq_pos(rq));
498 		dm_dispatch_clone_request(clone, rq);
499 		break;
500 	case DM_MAPIO_REQUEUE:
501 		/* The target wants to requeue the I/O */
502 		break;
503 	case DM_MAPIO_DELAY_REQUEUE:
504 		/* The target wants to requeue the I/O after a delay */
505 		dm_requeue_original_request(tio, true);
506 		break;
507 	case DM_MAPIO_KILL:
508 		/* The target wants to complete the I/O */
509 		dm_kill_unmapped_request(rq, BLK_STS_IOERR);
510 		break;
511 	default:
512 		DMWARN("unimplemented target map return value: %d", r);
513 		BUG();
514 	}
515 
516 	return r;
517 }
518 
519 static void dm_start_request(struct mapped_device *md, struct request *orig)
520 {
521 	if (!orig->q->mq_ops)
522 		blk_start_request(orig);
523 	else
524 		blk_mq_start_request(orig);
525 	atomic_inc(&md->pending[rq_data_dir(orig)]);
526 
527 	if (md->seq_rq_merge_deadline_usecs) {
528 		md->last_rq_pos = rq_end_sector(orig);
529 		md->last_rq_rw = rq_data_dir(orig);
530 		md->last_rq_start_time = ktime_get();
531 	}
532 
533 	if (unlikely(dm_stats_used(&md->stats))) {
534 		struct dm_rq_target_io *tio = tio_from_request(orig);
535 		tio->duration_jiffies = jiffies;
536 		tio->n_sectors = blk_rq_sectors(orig);
537 		dm_stats_account_io(&md->stats, rq_data_dir(orig),
538 				    blk_rq_pos(orig), tio->n_sectors, false, 0,
539 				    &tio->stats_aux);
540 	}
541 
542 	/*
543 	 * Hold the md reference here for the in-flight I/O.
544 	 * We can't rely on the reference count by device opener,
545 	 * because the device may be closed during the request completion
546 	 * when all bios are completed.
547 	 * See the comment in rq_completed() too.
548 	 */
549 	dm_get(md);
550 }
551 
552 static int __dm_rq_init_rq(struct mapped_device *md, struct request *rq)
553 {
554 	struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
555 
556 	/*
557 	 * Must initialize md member of tio, otherwise it won't
558 	 * be available in dm_mq_queue_rq.
559 	 */
560 	tio->md = md;
561 
562 	if (md->init_tio_pdu) {
563 		/* target-specific per-io data is immediately after the tio */
564 		tio->info.ptr = tio + 1;
565 	}
566 
567 	return 0;
568 }
569 
570 static int dm_rq_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
571 {
572 	return __dm_rq_init_rq(q->rq_alloc_data, rq);
573 }
574 
575 static void map_tio_request(struct kthread_work *work)
576 {
577 	struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
578 
579 	if (map_request(tio) == DM_MAPIO_REQUEUE)
580 		dm_requeue_original_request(tio, false);
581 }
582 
583 ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
584 {
585 	return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs);
586 }
587 
588 #define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
589 
590 ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
591 						     const char *buf, size_t count)
592 {
593 	unsigned deadline;
594 
595 	if (dm_get_md_type(md) != DM_TYPE_REQUEST_BASED)
596 		return count;
597 
598 	if (kstrtouint(buf, 10, &deadline))
599 		return -EINVAL;
600 
601 	if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
602 		deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
603 
604 	md->seq_rq_merge_deadline_usecs = deadline;
605 
606 	return count;
607 }
608 
609 static bool dm_old_request_peeked_before_merge_deadline(struct mapped_device *md)
610 {
611 	ktime_t kt_deadline;
612 
613 	if (!md->seq_rq_merge_deadline_usecs)
614 		return false;
615 
616 	kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
617 	kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);
618 
619 	return !ktime_after(ktime_get(), kt_deadline);
620 }
621 
622 /*
623  * q->request_fn for old request-based dm.
624  * Called with the queue lock held.
625  */
626 static void dm_old_request_fn(struct request_queue *q)
627 {
628 	struct mapped_device *md = q->queuedata;
629 	struct dm_target *ti = md->immutable_target;
630 	struct request *rq;
631 	struct dm_rq_target_io *tio;
632 	sector_t pos = 0;
633 
634 	if (unlikely(!ti)) {
635 		int srcu_idx;
636 		struct dm_table *map = dm_get_live_table(md, &srcu_idx);
637 
638 		if (unlikely(!map)) {
639 			dm_put_live_table(md, srcu_idx);
640 			return;
641 		}
642 		ti = dm_table_find_target(map, pos);
643 		dm_put_live_table(md, srcu_idx);
644 	}
645 
646 	/*
647 	 * For suspend, check blk_queue_stopped() and increment
648 	 * ->pending within a single queue_lock not to increment the
649 	 * number of in-flight I/Os after the queue is stopped in
650 	 * dm_suspend().
651 	 */
652 	while (!blk_queue_stopped(q)) {
653 		rq = blk_peek_request(q);
654 		if (!rq)
655 			return;
656 
657 		/* always use block 0 to find the target for flushes for now */
658 		pos = 0;
659 		if (req_op(rq) != REQ_OP_FLUSH)
660 			pos = blk_rq_pos(rq);
661 
662 		if ((dm_old_request_peeked_before_merge_deadline(md) &&
663 		     md_in_flight(md) && rq->bio && !bio_multiple_segments(rq->bio) &&
664 		     md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
665 		    (ti->type->busy && ti->type->busy(ti))) {
666 			blk_delay_queue(q, 10);
667 			return;
668 		}
669 
670 		dm_start_request(md, rq);
671 
672 		tio = tio_from_request(rq);
673 		init_tio(tio, rq, md);
674 		/* Establish tio->ti before queuing work (map_tio_request) */
675 		tio->ti = ti;
676 		kthread_queue_work(&md->kworker, &tio->work);
677 		BUG_ON(!irqs_disabled());
678 	}
679 }
680 
681 /*
682  * Fully initialize a .request_fn request-based queue.
683  */
684 int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
685 {
686 	struct dm_target *immutable_tgt;
687 
688 	/* Fully initialize the queue */
689 	md->queue->cmd_size = sizeof(struct dm_rq_target_io);
690 	md->queue->rq_alloc_data = md;
691 	md->queue->request_fn = dm_old_request_fn;
692 	md->queue->init_rq_fn = dm_rq_init_rq;
693 
694 	immutable_tgt = dm_table_get_immutable_target(t);
695 	if (immutable_tgt && immutable_tgt->per_io_data_size) {
696 		/* any target-specific per-io data is immediately after the tio */
697 		md->queue->cmd_size += immutable_tgt->per_io_data_size;
698 		md->init_tio_pdu = true;
699 	}
700 	if (blk_init_allocated_queue(md->queue) < 0)
701 		return -EINVAL;
702 
703 	/* disable dm_old_request_fn's merge heuristic by default */
704 	md->seq_rq_merge_deadline_usecs = 0;
705 
706 	dm_init_normal_md_queue(md);
707 	blk_queue_softirq_done(md->queue, dm_softirq_done);
708 
709 	/* Initialize the request-based DM worker thread */
710 	kthread_init_worker(&md->kworker);
711 	md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
712 				       "kdmwork-%s", dm_device_name(md));
713 	if (IS_ERR(md->kworker_task)) {
714 		int error = PTR_ERR(md->kworker_task);
715 		md->kworker_task = NULL;
716 		return error;
717 	}
718 
719 	elv_register_queue(md->queue);
720 
721 	return 0;
722 }
723 
724 static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
725 		unsigned int hctx_idx, unsigned int numa_node)
726 {
727 	return __dm_rq_init_rq(set->driver_data, rq);
728 }
729 
730 static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
731 			  const struct blk_mq_queue_data *bd)
732 {
733 	struct request *rq = bd->rq;
734 	struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
735 	struct mapped_device *md = tio->md;
736 	struct dm_target *ti = md->immutable_target;
737 
738 	if (unlikely(!ti)) {
739 		int srcu_idx;
740 		struct dm_table *map = dm_get_live_table(md, &srcu_idx);
741 
742 		ti = dm_table_find_target(map, 0);
743 		dm_put_live_table(md, srcu_idx);
744 	}
745 
746 	if (ti->type->busy && ti->type->busy(ti))
747 		return BLK_STS_RESOURCE;
748 
749 	dm_start_request(md, rq);
750 
751 	/* Init tio using md established in .init_request */
752 	init_tio(tio, rq, md);
753 
754 	/*
755 	 * Establish tio->ti before calling map_request().
756 	 */
757 	tio->ti = ti;
758 
759 	/* Direct call is fine since .queue_rq allows allocations */
760 	if (map_request(tio) == DM_MAPIO_REQUEUE) {
761 		/* Undo dm_start_request() before requeuing */
762 		rq_end_stats(md, rq);
763 		rq_completed(md, rq_data_dir(rq), false);
764 		blk_mq_delay_run_hw_queue(hctx, 100/*ms*/);
765 		return BLK_STS_RESOURCE;
766 	}
767 
768 	return BLK_STS_OK;
769 }
770 
771 static const struct blk_mq_ops dm_mq_ops = {
772 	.queue_rq = dm_mq_queue_rq,
773 	.complete = dm_softirq_done,
774 	.init_request = dm_mq_init_request,
775 };
776 
777 int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
778 {
779 	struct request_queue *q;
780 	struct dm_target *immutable_tgt;
781 	int err;
782 
783 	if (!dm_table_all_blk_mq_devices(t)) {
784 		DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
785 		return -EINVAL;
786 	}
787 
788 	md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
789 	if (!md->tag_set)
790 		return -ENOMEM;
791 
792 	md->tag_set->ops = &dm_mq_ops;
793 	md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
794 	md->tag_set->numa_node = md->numa_node_id;
795 	md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
796 	md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
797 	md->tag_set->driver_data = md;
798 
799 	md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
800 	immutable_tgt = dm_table_get_immutable_target(t);
801 	if (immutable_tgt && immutable_tgt->per_io_data_size) {
802 		/* any target-specific per-io data is immediately after the tio */
803 		md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
804 		md->init_tio_pdu = true;
805 	}
806 
807 	err = blk_mq_alloc_tag_set(md->tag_set);
808 	if (err)
809 		goto out_kfree_tag_set;
810 
811 	q = blk_mq_init_allocated_queue(md->tag_set, md->queue);
812 	if (IS_ERR(q)) {
813 		err = PTR_ERR(q);
814 		goto out_tag_set;
815 	}
816 	dm_init_md_queue(md);
817 
818 	/* backfill 'mq' sysfs registration normally done in blk_register_queue */
819 	err = blk_mq_register_dev(disk_to_dev(md->disk), q);
820 	if (err)
821 		goto out_cleanup_queue;
822 
823 	return 0;
824 
825 out_cleanup_queue:
826 	blk_cleanup_queue(q);
827 out_tag_set:
828 	blk_mq_free_tag_set(md->tag_set);
829 out_kfree_tag_set:
830 	kfree(md->tag_set);
831 
832 	return err;
833 }
834 
835 void dm_mq_cleanup_mapped_device(struct mapped_device *md)
836 {
837 	if (md->tag_set) {
838 		blk_mq_free_tag_set(md->tag_set);
839 		kfree(md->tag_set);
840 	}
841 }
842 
843 module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
844 MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
845 
846 module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
847 MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
848 
849 module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR);
850 MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices");
851 
852 module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR);
853 MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices");
854