xref: /linux/include/linux/blk-mq.h (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef BLK_MQ_H
3 #define BLK_MQ_H
4 
5 #include <linux/blkdev.h>
6 #include <linux/sbitmap.h>
7 #include <linux/lockdep.h>
8 #include <linux/scatterlist.h>
9 #include <linux/prefetch.h>
10 #include <linux/srcu.h>
11 #include <linux/rw_hint.h>
12 
13 struct blk_mq_tags;
14 struct blk_flush_queue;
15 
16 #define BLKDEV_MIN_RQ	4
17 #define BLKDEV_DEFAULT_RQ	128
18 
19 enum rq_end_io_ret {
20 	RQ_END_IO_NONE,
21 	RQ_END_IO_FREE,
22 };
23 
24 typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
25 
26 /*
27  * request flags */
28 typedef __u32 __bitwise req_flags_t;
29 
30 /* Keep rqf_name[] in sync with the definitions below */
31 enum {
32 	/* drive already may have started this one */
33 	__RQF_STARTED,
34 	/* request for flush sequence */
35 	__RQF_FLUSH_SEQ,
36 	/* merge of different types, fail separately */
37 	__RQF_MIXED_MERGE,
38 	/* don't call prep for this one */
39 	__RQF_DONTPREP,
40 	/* use hctx->sched_tags */
41 	__RQF_SCHED_TAGS,
42 	/* use an I/O scheduler for this request */
43 	__RQF_USE_SCHED,
44 	/* vaguely specified driver internal error.  Ignored by block layer */
45 	__RQF_FAILED,
46 	/* don't warn about errors */
47 	__RQF_QUIET,
48 	/* account into disk and partition IO statistics */
49 	__RQF_IO_STAT,
50 	/* runtime pm request */
51 	__RQF_PM,
52 	/* on IO scheduler merge hash */
53 	__RQF_HASHED,
54 	/* track IO completion time */
55 	__RQF_STATS,
56 	/* Look at ->special_vec for the actual data payload instead of the
57 	   bio chain. */
58 	__RQF_SPECIAL_PAYLOAD,
59 	/* request completion needs to be signaled to zone write plugging. */
60 	__RQF_ZONE_WRITE_PLUGGING,
61 	/* ->timeout has been called, don't expire again */
62 	__RQF_TIMED_OUT,
63 	__RQF_RESV,
64 	__RQF_BITS
65 };
66 
67 #define RQF_STARTED		((__force req_flags_t)(1 << __RQF_STARTED))
68 #define RQF_FLUSH_SEQ		((__force req_flags_t)(1 << __RQF_FLUSH_SEQ))
69 #define RQF_MIXED_MERGE		((__force req_flags_t)(1 << __RQF_MIXED_MERGE))
70 #define RQF_DONTPREP		((__force req_flags_t)(1 << __RQF_DONTPREP))
71 #define RQF_SCHED_TAGS		((__force req_flags_t)(1 << __RQF_SCHED_TAGS))
72 #define RQF_USE_SCHED		((__force req_flags_t)(1 << __RQF_USE_SCHED))
73 #define RQF_FAILED		((__force req_flags_t)(1 << __RQF_FAILED))
74 #define RQF_QUIET		((__force req_flags_t)(1 << __RQF_QUIET))
75 #define RQF_IO_STAT		((__force req_flags_t)(1 << __RQF_IO_STAT))
76 #define RQF_PM			((__force req_flags_t)(1 << __RQF_PM))
77 #define RQF_HASHED		((__force req_flags_t)(1 << __RQF_HASHED))
78 #define RQF_STATS		((__force req_flags_t)(1 << __RQF_STATS))
79 #define RQF_SPECIAL_PAYLOAD	\
80 			((__force req_flags_t)(1 << __RQF_SPECIAL_PAYLOAD))
81 #define RQF_ZONE_WRITE_PLUGGING	\
82 			((__force req_flags_t)(1 << __RQF_ZONE_WRITE_PLUGGING))
83 #define RQF_TIMED_OUT		((__force req_flags_t)(1 << __RQF_TIMED_OUT))
84 #define RQF_RESV		((__force req_flags_t)(1 << __RQF_RESV))
85 
86 /* flags that prevent us from merging requests: */
87 #define RQF_NOMERGE_FLAGS \
88 	(RQF_STARTED | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
89 
90 enum mq_rq_state {
91 	MQ_RQ_IDLE		= 0,
92 	MQ_RQ_IN_FLIGHT		= 1,
93 	MQ_RQ_COMPLETE		= 2,
94 };
95 
96 /*
97  * Try to put the fields that are referenced together in the same cacheline.
98  *
99  * If you modify this structure, make sure to update blk_rq_init() and
100  * especially blk_mq_rq_ctx_init() to take care of the added fields.
101  */
102 struct request {
103 	struct request_queue *q;
104 	struct blk_mq_ctx *mq_ctx;
105 	struct blk_mq_hw_ctx *mq_hctx;
106 
107 	blk_opf_t cmd_flags;		/* op and common flags */
108 	req_flags_t rq_flags;
109 
110 	int tag;
111 	int internal_tag;
112 
113 	unsigned int timeout;
114 
115 	/* the following two fields are internal, NEVER access directly */
116 	unsigned int __data_len;	/* total data len */
117 	sector_t __sector;		/* sector cursor */
118 
119 	struct bio *bio;
120 	struct bio *biotail;
121 
122 	union {
123 		struct list_head queuelist;
124 		struct request *rq_next;
125 	};
126 
127 	struct block_device *part;
128 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
129 	/* Time that the first bio started allocating this request. */
130 	u64 alloc_time_ns;
131 #endif
132 	/* Time that this request was allocated for this IO. */
133 	u64 start_time_ns;
134 	/* Time that I/O was submitted to the device. */
135 	u64 io_start_time_ns;
136 
137 #ifdef CONFIG_BLK_WBT
138 	unsigned short wbt_flags;
139 #endif
140 	/*
141 	 * rq sectors used for blk stats. It has the same value
142 	 * with blk_rq_sectors(rq), except that it never be zeroed
143 	 * by completion.
144 	 */
145 	unsigned short stats_sectors;
146 
147 	/*
148 	 * Number of scatter-gather DMA addr+len pairs after
149 	 * physical address coalescing is performed.
150 	 */
151 	unsigned short nr_phys_segments;
152 
153 #ifdef CONFIG_BLK_DEV_INTEGRITY
154 	unsigned short nr_integrity_segments;
155 #endif
156 
157 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
158 	struct bio_crypt_ctx *crypt_ctx;
159 	struct blk_crypto_keyslot *crypt_keyslot;
160 #endif
161 
162 	enum rw_hint write_hint;
163 	unsigned short ioprio;
164 
165 	enum mq_rq_state state;
166 	atomic_t ref;
167 
168 	unsigned long deadline;
169 
170 	/*
171 	 * The hash is used inside the scheduler, and killed once the
172 	 * request reaches the dispatch list. The ipi_list is only used
173 	 * to queue the request for softirq completion, which is long
174 	 * after the request has been unhashed (and even removed from
175 	 * the dispatch list).
176 	 */
177 	union {
178 		struct hlist_node hash;	/* merge hash */
179 		struct llist_node ipi_list;
180 	};
181 
182 	/*
183 	 * The rb_node is only used inside the io scheduler, requests
184 	 * are pruned when moved to the dispatch queue. special_vec must
185 	 * only be used if RQF_SPECIAL_PAYLOAD is set, and those cannot be
186 	 * insert into an IO scheduler.
187 	 */
188 	union {
189 		struct rb_node rb_node;	/* sort/lookup */
190 		struct bio_vec special_vec;
191 	};
192 
193 	/*
194 	 * Three pointers are available for the IO schedulers, if they need
195 	 * more they have to dynamically allocate it.
196 	 */
197 	struct {
198 		struct io_cq		*icq;
199 		void			*priv[2];
200 	} elv;
201 
202 	struct {
203 		unsigned int		seq;
204 		rq_end_io_fn		*saved_end_io;
205 	} flush;
206 
207 	u64 fifo_time;
208 
209 	/*
210 	 * completion callback.
211 	 */
212 	rq_end_io_fn *end_io;
213 	void *end_io_data;
214 };
215 
req_op(const struct request * req)216 static inline enum req_op req_op(const struct request *req)
217 {
218 	return req->cmd_flags & REQ_OP_MASK;
219 }
220 
blk_rq_is_passthrough(struct request * rq)221 static inline bool blk_rq_is_passthrough(struct request *rq)
222 {
223 	return blk_op_is_passthrough(rq->cmd_flags);
224 }
225 
req_get_ioprio(struct request * req)226 static inline unsigned short req_get_ioprio(struct request *req)
227 {
228 	return req->ioprio;
229 }
230 
231 #define rq_data_dir(rq)		(op_is_write(req_op(rq)) ? WRITE : READ)
232 
233 #define rq_dma_dir(rq) \
234 	(op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
235 
236 #define rq_list_add(listptr, rq)	do {		\
237 	(rq)->rq_next = *(listptr);			\
238 	*(listptr) = rq;				\
239 } while (0)
240 
241 #define rq_list_add_tail(lastpptr, rq)	do {		\
242 	(rq)->rq_next = NULL;				\
243 	**(lastpptr) = rq;				\
244 	*(lastpptr) = &rq->rq_next;			\
245 } while (0)
246 
247 #define rq_list_pop(listptr)				\
248 ({							\
249 	struct request *__req = NULL;			\
250 	if ((listptr) && *(listptr))	{		\
251 		__req = *(listptr);			\
252 		*(listptr) = __req->rq_next;		\
253 	}						\
254 	__req;						\
255 })
256 
257 #define rq_list_peek(listptr)				\
258 ({							\
259 	struct request *__req = NULL;			\
260 	if ((listptr) && *(listptr))			\
261 		__req = *(listptr);			\
262 	__req;						\
263 })
264 
265 #define rq_list_for_each(listptr, pos)			\
266 	for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos))
267 
268 #define rq_list_for_each_safe(listptr, pos, nxt)			\
269 	for (pos = rq_list_peek((listptr)), nxt = rq_list_next(pos);	\
270 		pos; pos = nxt, nxt = pos ? rq_list_next(pos) : NULL)
271 
272 #define rq_list_next(rq)	(rq)->rq_next
273 #define rq_list_empty(list)	((list) == (struct request *) NULL)
274 
275 /**
276  * rq_list_move() - move a struct request from one list to another
277  * @src: The source list @rq is currently in
278  * @dst: The destination list that @rq will be appended to
279  * @rq: The request to move
280  * @prev: The request preceding @rq in @src (NULL if @rq is the head)
281  */
rq_list_move(struct request ** src,struct request ** dst,struct request * rq,struct request * prev)282 static inline void rq_list_move(struct request **src, struct request **dst,
283 				struct request *rq, struct request *prev)
284 {
285 	if (prev)
286 		prev->rq_next = rq->rq_next;
287 	else
288 		*src = rq->rq_next;
289 	rq_list_add(dst, rq);
290 }
291 
292 /**
293  * enum blk_eh_timer_return - How the timeout handler should proceed
294  * @BLK_EH_DONE: The block driver completed the command or will complete it at
295  *	a later time.
296  * @BLK_EH_RESET_TIMER: Reset the request timer and continue waiting for the
297  *	request to complete.
298  */
299 enum blk_eh_timer_return {
300 	BLK_EH_DONE,
301 	BLK_EH_RESET_TIMER,
302 };
303 
304 /* Keep alloc_policy_name[] in sync with the definitions below */
305 enum {
306 	BLK_TAG_ALLOC_FIFO,	/* allocate starting from 0 */
307 	BLK_TAG_ALLOC_RR,	/* allocate starting from last allocated tag */
308 	BLK_TAG_ALLOC_MAX
309 };
310 
311 /**
312  * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
313  * block device
314  */
315 struct blk_mq_hw_ctx {
316 	struct {
317 		/** @lock: Protects the dispatch list. */
318 		spinlock_t		lock;
319 		/**
320 		 * @dispatch: Used for requests that are ready to be
321 		 * dispatched to the hardware but for some reason (e.g. lack of
322 		 * resources) could not be sent to the hardware. As soon as the
323 		 * driver can send new requests, requests at this list will
324 		 * be sent first for a fairer dispatch.
325 		 */
326 		struct list_head	dispatch;
327 		 /**
328 		  * @state: BLK_MQ_S_* flags. Defines the state of the hw
329 		  * queue (active, scheduled to restart, stopped).
330 		  */
331 		unsigned long		state;
332 	} ____cacheline_aligned_in_smp;
333 
334 	/**
335 	 * @run_work: Used for scheduling a hardware queue run at a later time.
336 	 */
337 	struct delayed_work	run_work;
338 	/** @cpumask: Map of available CPUs where this hctx can run. */
339 	cpumask_var_t		cpumask;
340 	/**
341 	 * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU
342 	 * selection from @cpumask.
343 	 */
344 	int			next_cpu;
345 	/**
346 	 * @next_cpu_batch: Counter of how many works left in the batch before
347 	 * changing to the next CPU.
348 	 */
349 	int			next_cpu_batch;
350 
351 	/** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */
352 	unsigned long		flags;
353 
354 	/**
355 	 * @sched_data: Pointer owned by the IO scheduler attached to a request
356 	 * queue. It's up to the IO scheduler how to use this pointer.
357 	 */
358 	void			*sched_data;
359 	/**
360 	 * @queue: Pointer to the request queue that owns this hardware context.
361 	 */
362 	struct request_queue	*queue;
363 	/** @fq: Queue of requests that need to perform a flush operation. */
364 	struct blk_flush_queue	*fq;
365 
366 	/**
367 	 * @driver_data: Pointer to data owned by the block driver that created
368 	 * this hctx
369 	 */
370 	void			*driver_data;
371 
372 	/**
373 	 * @ctx_map: Bitmap for each software queue. If bit is on, there is a
374 	 * pending request in that software queue.
375 	 */
376 	struct sbitmap		ctx_map;
377 
378 	/**
379 	 * @dispatch_from: Software queue to be used when no scheduler was
380 	 * selected.
381 	 */
382 	struct blk_mq_ctx	*dispatch_from;
383 	/**
384 	 * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to
385 	 * decide if the hw_queue is busy using Exponential Weighted Moving
386 	 * Average algorithm.
387 	 */
388 	unsigned int		dispatch_busy;
389 
390 	/** @type: HCTX_TYPE_* flags. Type of hardware queue. */
391 	unsigned short		type;
392 	/** @nr_ctx: Number of software queues. */
393 	unsigned short		nr_ctx;
394 	/** @ctxs: Array of software queues. */
395 	struct blk_mq_ctx	**ctxs;
396 
397 	/** @dispatch_wait_lock: Lock for dispatch_wait queue. */
398 	spinlock_t		dispatch_wait_lock;
399 	/**
400 	 * @dispatch_wait: Waitqueue to put requests when there is no tag
401 	 * available at the moment, to wait for another try in the future.
402 	 */
403 	wait_queue_entry_t	dispatch_wait;
404 
405 	/**
406 	 * @wait_index: Index of next available dispatch_wait queue to insert
407 	 * requests.
408 	 */
409 	atomic_t		wait_index;
410 
411 	/**
412 	 * @tags: Tags owned by the block driver. A tag at this set is only
413 	 * assigned when a request is dispatched from a hardware queue.
414 	 */
415 	struct blk_mq_tags	*tags;
416 	/**
417 	 * @sched_tags: Tags owned by I/O scheduler. If there is an I/O
418 	 * scheduler associated with a request queue, a tag is assigned when
419 	 * that request is allocated. Else, this member is not used.
420 	 */
421 	struct blk_mq_tags	*sched_tags;
422 
423 	/** @numa_node: NUMA node the storage adapter has been connected to. */
424 	unsigned int		numa_node;
425 	/** @queue_num: Index of this hardware queue. */
426 	unsigned int		queue_num;
427 
428 	/**
429 	 * @nr_active: Number of active requests. Only used when a tag set is
430 	 * shared across request queues.
431 	 */
432 	atomic_t		nr_active;
433 
434 	/** @cpuhp_online: List to store request if CPU is going to die */
435 	struct hlist_node	cpuhp_online;
436 	/** @cpuhp_dead: List to store request if some CPU die. */
437 	struct hlist_node	cpuhp_dead;
438 	/** @kobj: Kernel object for sysfs. */
439 	struct kobject		kobj;
440 
441 #ifdef CONFIG_BLK_DEBUG_FS
442 	/**
443 	 * @debugfs_dir: debugfs directory for this hardware queue. Named
444 	 * as cpu<cpu_number>.
445 	 */
446 	struct dentry		*debugfs_dir;
447 	/** @sched_debugfs_dir:	debugfs directory for the scheduler. */
448 	struct dentry		*sched_debugfs_dir;
449 #endif
450 
451 	/**
452 	 * @hctx_list: if this hctx is not in use, this is an entry in
453 	 * q->unused_hctx_list.
454 	 */
455 	struct list_head	hctx_list;
456 };
457 
458 /**
459  * struct blk_mq_queue_map - Map software queues to hardware queues
460  * @mq_map:       CPU ID to hardware queue index map. This is an array
461  *	with nr_cpu_ids elements. Each element has a value in the range
462  *	[@queue_offset, @queue_offset + @nr_queues).
463  * @nr_queues:    Number of hardware queues to map CPU IDs onto.
464  * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe
465  *	driver to map each hardware queue type (enum hctx_type) onto a distinct
466  *	set of hardware queues.
467  */
468 struct blk_mq_queue_map {
469 	unsigned int *mq_map;
470 	unsigned int nr_queues;
471 	unsigned int queue_offset;
472 };
473 
474 /**
475  * enum hctx_type - Type of hardware queue
476  * @HCTX_TYPE_DEFAULT:	All I/O not otherwise accounted for.
477  * @HCTX_TYPE_READ:	Just for READ I/O.
478  * @HCTX_TYPE_POLL:	Polled I/O of any kind.
479  * @HCTX_MAX_TYPES:	Number of types of hctx.
480  */
481 enum hctx_type {
482 	HCTX_TYPE_DEFAULT,
483 	HCTX_TYPE_READ,
484 	HCTX_TYPE_POLL,
485 
486 	HCTX_MAX_TYPES,
487 };
488 
489 /**
490  * struct blk_mq_tag_set - tag set that can be shared between request queues
491  * @ops:	   Pointers to functions that implement block driver behavior.
492  * @map:	   One or more ctx -> hctx mappings. One map exists for each
493  *		   hardware queue type (enum hctx_type) that the driver wishes
494  *		   to support. There are no restrictions on maps being of the
495  *		   same size, and it's perfectly legal to share maps between
496  *		   types.
497  * @nr_maps:	   Number of elements in the @map array. A number in the range
498  *		   [1, HCTX_MAX_TYPES].
499  * @nr_hw_queues:  Number of hardware queues supported by the block driver that
500  *		   owns this data structure.
501  * @queue_depth:   Number of tags per hardware queue, reserved tags included.
502  * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag
503  *		   allocations.
504  * @cmd_size:	   Number of additional bytes to allocate per request. The block
505  *		   driver owns these additional bytes.
506  * @numa_node:	   NUMA node the storage adapter has been connected to.
507  * @timeout:	   Request processing timeout in jiffies.
508  * @flags:	   Zero or more BLK_MQ_F_* flags.
509  * @driver_data:   Pointer to data owned by the block driver that created this
510  *		   tag set.
511  * @tags:	   Tag sets. One tag set per hardware queue. Has @nr_hw_queues
512  *		   elements.
513  * @shared_tags:
514  *		   Shared set of tags. Has @nr_hw_queues elements. If set,
515  *		   shared by all @tags.
516  * @tag_list_lock: Serializes tag_list accesses.
517  * @tag_list:	   List of the request queues that use this tag set. See also
518  *		   request_queue.tag_set_list.
519  * @srcu:	   Use as lock when type of the request queue is blocking
520  *		   (BLK_MQ_F_BLOCKING).
521  */
522 struct blk_mq_tag_set {
523 	const struct blk_mq_ops	*ops;
524 	struct blk_mq_queue_map	map[HCTX_MAX_TYPES];
525 	unsigned int		nr_maps;
526 	unsigned int		nr_hw_queues;
527 	unsigned int		queue_depth;
528 	unsigned int		reserved_tags;
529 	unsigned int		cmd_size;
530 	int			numa_node;
531 	unsigned int		timeout;
532 	unsigned int		flags;
533 	void			*driver_data;
534 
535 	struct blk_mq_tags	**tags;
536 
537 	struct blk_mq_tags	*shared_tags;
538 
539 	struct mutex		tag_list_lock;
540 	struct list_head	tag_list;
541 	struct srcu_struct	*srcu;
542 };
543 
544 /**
545  * struct blk_mq_queue_data - Data about a request inserted in a queue
546  *
547  * @rq:   Request pointer.
548  * @last: If it is the last request in the queue.
549  */
550 struct blk_mq_queue_data {
551 	struct request *rq;
552 	bool last;
553 };
554 
555 typedef bool (busy_tag_iter_fn)(struct request *, void *);
556 
557 /**
558  * struct blk_mq_ops - Callback functions that implements block driver
559  * behaviour.
560  */
561 struct blk_mq_ops {
562 	/**
563 	 * @queue_rq: Queue a new request from block IO.
564 	 */
565 	blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *,
566 				 const struct blk_mq_queue_data *);
567 
568 	/**
569 	 * @commit_rqs: If a driver uses bd->last to judge when to submit
570 	 * requests to hardware, it must define this function. In case of errors
571 	 * that make us stop issuing further requests, this hook serves the
572 	 * purpose of kicking the hardware (which the last request otherwise
573 	 * would have done).
574 	 */
575 	void (*commit_rqs)(struct blk_mq_hw_ctx *);
576 
577 	/**
578 	 * @queue_rqs: Queue a list of new requests. Driver is guaranteed
579 	 * that each request belongs to the same queue. If the driver doesn't
580 	 * empty the @rqlist completely, then the rest will be queued
581 	 * individually by the block layer upon return.
582 	 */
583 	void (*queue_rqs)(struct request **rqlist);
584 
585 	/**
586 	 * @get_budget: Reserve budget before queue request, once .queue_rq is
587 	 * run, it is driver's responsibility to release the
588 	 * reserved budget. Also we have to handle failure case
589 	 * of .get_budget for avoiding I/O deadlock.
590 	 */
591 	int (*get_budget)(struct request_queue *);
592 
593 	/**
594 	 * @put_budget: Release the reserved budget.
595 	 */
596 	void (*put_budget)(struct request_queue *, int);
597 
598 	/**
599 	 * @set_rq_budget_token: store rq's budget token
600 	 */
601 	void (*set_rq_budget_token)(struct request *, int);
602 	/**
603 	 * @get_rq_budget_token: retrieve rq's budget token
604 	 */
605 	int (*get_rq_budget_token)(struct request *);
606 
607 	/**
608 	 * @timeout: Called on request timeout.
609 	 */
610 	enum blk_eh_timer_return (*timeout)(struct request *);
611 
612 	/**
613 	 * @poll: Called to poll for completion of a specific tag.
614 	 */
615 	int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *);
616 
617 	/**
618 	 * @complete: Mark the request as complete.
619 	 */
620 	void (*complete)(struct request *);
621 
622 	/**
623 	 * @init_hctx: Called when the block layer side of a hardware queue has
624 	 * been set up, allowing the driver to allocate/init matching
625 	 * structures.
626 	 */
627 	int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int);
628 	/**
629 	 * @exit_hctx: Ditto for exit/teardown.
630 	 */
631 	void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
632 
633 	/**
634 	 * @init_request: Called for every command allocated by the block layer
635 	 * to allow the driver to set up driver specific data.
636 	 *
637 	 * Tag greater than or equal to queue_depth is for setting up
638 	 * flush request.
639 	 */
640 	int (*init_request)(struct blk_mq_tag_set *set, struct request *,
641 			    unsigned int, unsigned int);
642 	/**
643 	 * @exit_request: Ditto for exit/teardown.
644 	 */
645 	void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
646 			     unsigned int);
647 
648 	/**
649 	 * @cleanup_rq: Called before freeing one request which isn't completed
650 	 * yet, and usually for freeing the driver private data.
651 	 */
652 	void (*cleanup_rq)(struct request *);
653 
654 	/**
655 	 * @busy: If set, returns whether or not this queue currently is busy.
656 	 */
657 	bool (*busy)(struct request_queue *);
658 
659 	/**
660 	 * @map_queues: This allows drivers specify their own queue mapping by
661 	 * overriding the setup-time function that builds the mq_map.
662 	 */
663 	void (*map_queues)(struct blk_mq_tag_set *set);
664 
665 #ifdef CONFIG_BLK_DEBUG_FS
666 	/**
667 	 * @show_rq: Used by the debugfs implementation to show driver-specific
668 	 * information about a request.
669 	 */
670 	void (*show_rq)(struct seq_file *m, struct request *rq);
671 #endif
672 };
673 
674 /* Keep hctx_flag_name[] in sync with the definitions below */
675 enum {
676 	BLK_MQ_F_SHOULD_MERGE	= 1 << 0,
677 	BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
678 	/*
679 	 * Set when this device requires underlying blk-mq device for
680 	 * completing IO:
681 	 */
682 	BLK_MQ_F_STACKING	= 1 << 2,
683 	BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
684 	BLK_MQ_F_BLOCKING	= 1 << 4,
685 	/* Do not allow an I/O scheduler to be configured. */
686 	BLK_MQ_F_NO_SCHED	= 1 << 5,
687 
688 	/*
689 	 * Select 'none' during queue registration in case of a single hwq
690 	 * or shared hwqs instead of 'mq-deadline'.
691 	 */
692 	BLK_MQ_F_NO_SCHED_BY_DEFAULT	= 1 << 6,
693 	BLK_MQ_F_ALLOC_POLICY_START_BIT = 7,
694 	BLK_MQ_F_ALLOC_POLICY_BITS = 1,
695 };
696 #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
697 	((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
698 		((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
699 #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
700 	((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
701 		<< BLK_MQ_F_ALLOC_POLICY_START_BIT)
702 
703 #define BLK_MQ_MAX_DEPTH	(10240)
704 #define BLK_MQ_NO_HCTX_IDX	(-1U)
705 
706 enum {
707 	/* Keep hctx_state_name[] in sync with the definitions below */
708 	BLK_MQ_S_STOPPED,
709 	BLK_MQ_S_TAG_ACTIVE,
710 	BLK_MQ_S_SCHED_RESTART,
711 	/* hw queue is inactive after all its CPUs become offline */
712 	BLK_MQ_S_INACTIVE,
713 	BLK_MQ_S_MAX
714 };
715 
716 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set,
717 		struct queue_limits *lim, void *queuedata,
718 		struct lock_class_key *lkclass);
719 #define blk_mq_alloc_disk(set, lim, queuedata)				\
720 ({									\
721 	static struct lock_class_key __key;				\
722 									\
723 	__blk_mq_alloc_disk(set, lim, queuedata, &__key);		\
724 })
725 struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
726 		struct lock_class_key *lkclass);
727 struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set,
728 		struct queue_limits *lim, void *queuedata);
729 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
730 		struct request_queue *q);
731 void blk_mq_destroy_queue(struct request_queue *);
732 
733 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
734 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
735 		const struct blk_mq_ops *ops, unsigned int queue_depth,
736 		unsigned int set_flags);
737 void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
738 
739 void blk_mq_free_request(struct request *rq);
740 int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
741 		unsigned int poll_flags);
742 
743 bool blk_mq_queue_inflight(struct request_queue *q);
744 
745 enum {
746 	/* return when out of requests */
747 	BLK_MQ_REQ_NOWAIT	= (__force blk_mq_req_flags_t)(1 << 0),
748 	/* allocate from reserved pool */
749 	BLK_MQ_REQ_RESERVED	= (__force blk_mq_req_flags_t)(1 << 1),
750 	/* set RQF_PM */
751 	BLK_MQ_REQ_PM		= (__force blk_mq_req_flags_t)(1 << 2),
752 };
753 
754 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
755 		blk_mq_req_flags_t flags);
756 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
757 		blk_opf_t opf, blk_mq_req_flags_t flags,
758 		unsigned int hctx_idx);
759 
760 /*
761  * Tag address space map.
762  */
763 struct blk_mq_tags {
764 	unsigned int nr_tags;
765 	unsigned int nr_reserved_tags;
766 	unsigned int active_queues;
767 
768 	struct sbitmap_queue bitmap_tags;
769 	struct sbitmap_queue breserved_tags;
770 
771 	struct request **rqs;
772 	struct request **static_rqs;
773 	struct list_head page_list;
774 
775 	/*
776 	 * used to clear request reference in rqs[] before freeing one
777 	 * request pool
778 	 */
779 	spinlock_t lock;
780 };
781 
blk_mq_tag_to_rq(struct blk_mq_tags * tags,unsigned int tag)782 static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags,
783 					       unsigned int tag)
784 {
785 	if (tag < tags->nr_tags) {
786 		prefetch(tags->rqs[tag]);
787 		return tags->rqs[tag];
788 	}
789 
790 	return NULL;
791 }
792 
793 enum {
794 	BLK_MQ_UNIQUE_TAG_BITS = 16,
795 	BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
796 };
797 
798 u32 blk_mq_unique_tag(struct request *rq);
799 
blk_mq_unique_tag_to_hwq(u32 unique_tag)800 static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
801 {
802 	return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
803 }
804 
blk_mq_unique_tag_to_tag(u32 unique_tag)805 static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
806 {
807 	return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
808 }
809 
810 /**
811  * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
812  * @rq: target request.
813  */
blk_mq_rq_state(struct request * rq)814 static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
815 {
816 	return READ_ONCE(rq->state);
817 }
818 
blk_mq_request_started(struct request * rq)819 static inline int blk_mq_request_started(struct request *rq)
820 {
821 	return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
822 }
823 
blk_mq_request_completed(struct request * rq)824 static inline int blk_mq_request_completed(struct request *rq)
825 {
826 	return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
827 }
828 
829 /*
830  *
831  * Set the state to complete when completing a request from inside ->queue_rq.
832  * This is used by drivers that want to ensure special complete actions that
833  * need access to the request are called on failure, e.g. by nvme for
834  * multipathing.
835  */
blk_mq_set_request_complete(struct request * rq)836 static inline void blk_mq_set_request_complete(struct request *rq)
837 {
838 	WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
839 }
840 
841 /*
842  * Complete the request directly instead of deferring it to softirq or
843  * completing it another CPU. Useful in preemptible instead of an interrupt.
844  */
blk_mq_complete_request_direct(struct request * rq,void (* complete)(struct request * rq))845 static inline void blk_mq_complete_request_direct(struct request *rq,
846 		   void (*complete)(struct request *rq))
847 {
848 	WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
849 	complete(rq);
850 }
851 
852 void blk_mq_start_request(struct request *rq);
853 void blk_mq_end_request(struct request *rq, blk_status_t error);
854 void __blk_mq_end_request(struct request *rq, blk_status_t error);
855 void blk_mq_end_request_batch(struct io_comp_batch *ib);
856 
857 /*
858  * Only need start/end time stamping if we have iostat or
859  * blk stats enabled, or using an IO scheduler.
860  */
blk_mq_need_time_stamp(struct request * rq)861 static inline bool blk_mq_need_time_stamp(struct request *rq)
862 {
863 	/*
864 	 * passthrough io doesn't use iostat accounting, cgroup stats
865 	 * and io scheduler functionalities.
866 	 */
867 	if (blk_rq_is_passthrough(rq))
868 		return false;
869 	return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED));
870 }
871 
blk_mq_is_reserved_rq(struct request * rq)872 static inline bool blk_mq_is_reserved_rq(struct request *rq)
873 {
874 	return rq->rq_flags & RQF_RESV;
875 }
876 
877 /*
878  * Batched completions only work when there is no I/O error and no special
879  * ->end_io handler.
880  */
blk_mq_add_to_batch(struct request * req,struct io_comp_batch * iob,int ioerror,void (* complete)(struct io_comp_batch *))881 static inline bool blk_mq_add_to_batch(struct request *req,
882 				       struct io_comp_batch *iob, int ioerror,
883 				       void (*complete)(struct io_comp_batch *))
884 {
885 	/*
886 	 * blk_mq_end_request_batch() can't end request allocated from
887 	 * sched tags
888 	 */
889 	if (!iob || (req->rq_flags & RQF_SCHED_TAGS) || ioerror ||
890 			(req->end_io && !blk_rq_is_passthrough(req)))
891 		return false;
892 
893 	if (!iob->complete)
894 		iob->complete = complete;
895 	else if (iob->complete != complete)
896 		return false;
897 	iob->need_ts |= blk_mq_need_time_stamp(req);
898 	rq_list_add(&iob->req_list, req);
899 	return true;
900 }
901 
902 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
903 void blk_mq_kick_requeue_list(struct request_queue *q);
904 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
905 void blk_mq_complete_request(struct request *rq);
906 bool blk_mq_complete_request_remote(struct request *rq);
907 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
908 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
909 void blk_mq_stop_hw_queues(struct request_queue *q);
910 void blk_mq_start_hw_queues(struct request_queue *q);
911 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
912 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
913 void blk_mq_quiesce_queue(struct request_queue *q);
914 void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set);
915 void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set);
916 void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set);
917 void blk_mq_unquiesce_queue(struct request_queue *q);
918 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
919 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
920 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
921 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
922 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
923 		busy_tag_iter_fn *fn, void *priv);
924 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
925 void blk_mq_freeze_queue(struct request_queue *q);
926 void blk_mq_unfreeze_queue(struct request_queue *q);
927 void blk_freeze_queue_start(struct request_queue *q);
928 void blk_mq_freeze_queue_wait(struct request_queue *q);
929 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
930 				     unsigned long timeout);
931 
932 void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
933 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
934 
935 void blk_mq_quiesce_queue_nowait(struct request_queue *q);
936 
937 unsigned int blk_mq_rq_cpu(struct request *rq);
938 
939 bool __blk_should_fake_timeout(struct request_queue *q);
blk_should_fake_timeout(struct request_queue * q)940 static inline bool blk_should_fake_timeout(struct request_queue *q)
941 {
942 	if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
943 	    test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
944 		return __blk_should_fake_timeout(q);
945 	return false;
946 }
947 
948 /**
949  * blk_mq_rq_from_pdu - cast a PDU to a request
950  * @pdu: the PDU (Protocol Data Unit) to be casted
951  *
952  * Return: request
953  *
954  * Driver command data is immediately after the request. So subtract request
955  * size to get back to the original request.
956  */
blk_mq_rq_from_pdu(void * pdu)957 static inline struct request *blk_mq_rq_from_pdu(void *pdu)
958 {
959 	return pdu - sizeof(struct request);
960 }
961 
962 /**
963  * blk_mq_rq_to_pdu - cast a request to a PDU
964  * @rq: the request to be casted
965  *
966  * Return: pointer to the PDU
967  *
968  * Driver command data is immediately after the request. So add request to get
969  * the PDU.
970  */
blk_mq_rq_to_pdu(struct request * rq)971 static inline void *blk_mq_rq_to_pdu(struct request *rq)
972 {
973 	return rq + 1;
974 }
975 
976 #define queue_for_each_hw_ctx(q, hctx, i)				\
977 	xa_for_each(&(q)->hctx_table, (i), (hctx))
978 
979 #define hctx_for_each_ctx(hctx, ctx, i)					\
980 	for ((i) = 0; (i) < (hctx)->nr_ctx &&				\
981 	     ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
982 
blk_mq_cleanup_rq(struct request * rq)983 static inline void blk_mq_cleanup_rq(struct request *rq)
984 {
985 	if (rq->q->mq_ops->cleanup_rq)
986 		rq->q->mq_ops->cleanup_rq(rq);
987 }
988 
blk_rq_bio_prep(struct request * rq,struct bio * bio,unsigned int nr_segs)989 static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
990 		unsigned int nr_segs)
991 {
992 	rq->nr_phys_segments = nr_segs;
993 	rq->__data_len = bio->bi_iter.bi_size;
994 	rq->bio = rq->biotail = bio;
995 	rq->ioprio = bio_prio(bio);
996 }
997 
998 void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
999 		struct lock_class_key *key);
1000 
rq_is_sync(struct request * rq)1001 static inline bool rq_is_sync(struct request *rq)
1002 {
1003 	return op_is_sync(rq->cmd_flags);
1004 }
1005 
1006 void blk_rq_init(struct request_queue *q, struct request *rq);
1007 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
1008 		struct bio_set *bs, gfp_t gfp_mask,
1009 		int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);
1010 void blk_rq_unprep_clone(struct request *rq);
1011 blk_status_t blk_insert_cloned_request(struct request *rq);
1012 
1013 struct rq_map_data {
1014 	struct page **pages;
1015 	unsigned long offset;
1016 	unsigned short page_order;
1017 	unsigned short nr_entries;
1018 	bool null_mapped;
1019 	bool from_user;
1020 };
1021 
1022 int blk_rq_map_user(struct request_queue *, struct request *,
1023 		struct rq_map_data *, void __user *, unsigned long, gfp_t);
1024 int blk_rq_map_user_io(struct request *, struct rq_map_data *,
1025 		void __user *, unsigned long, gfp_t, bool, int, bool, int);
1026 int blk_rq_map_user_iov(struct request_queue *, struct request *,
1027 		struct rq_map_data *, const struct iov_iter *, gfp_t);
1028 int blk_rq_unmap_user(struct bio *);
1029 int blk_rq_map_kern(struct request_queue *, struct request *, void *,
1030 		unsigned int, gfp_t);
1031 int blk_rq_append_bio(struct request *rq, struct bio *bio);
1032 void blk_execute_rq_nowait(struct request *rq, bool at_head);
1033 blk_status_t blk_execute_rq(struct request *rq, bool at_head);
1034 bool blk_rq_is_poll(struct request *rq);
1035 
1036 struct req_iterator {
1037 	struct bvec_iter iter;
1038 	struct bio *bio;
1039 };
1040 
1041 #define __rq_for_each_bio(_bio, rq)	\
1042 	if ((rq->bio))			\
1043 		for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
1044 
1045 #define rq_for_each_segment(bvl, _rq, _iter)			\
1046 	__rq_for_each_bio(_iter.bio, _rq)			\
1047 		bio_for_each_segment(bvl, _iter.bio, _iter.iter)
1048 
1049 #define rq_for_each_bvec(bvl, _rq, _iter)			\
1050 	__rq_for_each_bio(_iter.bio, _rq)			\
1051 		bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
1052 
1053 #define rq_iter_last(bvec, _iter)				\
1054 		(_iter.bio->bi_next == NULL &&			\
1055 		 bio_iter_last(bvec, _iter.iter))
1056 
1057 /*
1058  * blk_rq_pos()			: the current sector
1059  * blk_rq_bytes()		: bytes left in the entire request
1060  * blk_rq_cur_bytes()		: bytes left in the current segment
1061  * blk_rq_sectors()		: sectors left in the entire request
1062  * blk_rq_cur_sectors()		: sectors left in the current segment
1063  * blk_rq_stats_sectors()	: sectors of the entire request used for stats
1064  */
blk_rq_pos(const struct request * rq)1065 static inline sector_t blk_rq_pos(const struct request *rq)
1066 {
1067 	return rq->__sector;
1068 }
1069 
blk_rq_bytes(const struct request * rq)1070 static inline unsigned int blk_rq_bytes(const struct request *rq)
1071 {
1072 	return rq->__data_len;
1073 }
1074 
blk_rq_cur_bytes(const struct request * rq)1075 static inline int blk_rq_cur_bytes(const struct request *rq)
1076 {
1077 	if (!rq->bio)
1078 		return 0;
1079 	if (!bio_has_data(rq->bio))	/* dataless requests such as discard */
1080 		return rq->bio->bi_iter.bi_size;
1081 	return bio_iovec(rq->bio).bv_len;
1082 }
1083 
blk_rq_sectors(const struct request * rq)1084 static inline unsigned int blk_rq_sectors(const struct request *rq)
1085 {
1086 	return blk_rq_bytes(rq) >> SECTOR_SHIFT;
1087 }
1088 
blk_rq_cur_sectors(const struct request * rq)1089 static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
1090 {
1091 	return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
1092 }
1093 
blk_rq_stats_sectors(const struct request * rq)1094 static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
1095 {
1096 	return rq->stats_sectors;
1097 }
1098 
1099 /*
1100  * Some commands like WRITE SAME have a payload or data transfer size which
1101  * is different from the size of the request.  Any driver that supports such
1102  * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
1103  * calculate the data transfer size.
1104  */
blk_rq_payload_bytes(struct request * rq)1105 static inline unsigned int blk_rq_payload_bytes(struct request *rq)
1106 {
1107 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1108 		return rq->special_vec.bv_len;
1109 	return blk_rq_bytes(rq);
1110 }
1111 
1112 /*
1113  * Return the first full biovec in the request.  The caller needs to check that
1114  * there are any bvecs before calling this helper.
1115  */
req_bvec(struct request * rq)1116 static inline struct bio_vec req_bvec(struct request *rq)
1117 {
1118 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1119 		return rq->special_vec;
1120 	return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
1121 }
1122 
blk_rq_count_bios(struct request * rq)1123 static inline unsigned int blk_rq_count_bios(struct request *rq)
1124 {
1125 	unsigned int nr_bios = 0;
1126 	struct bio *bio;
1127 
1128 	__rq_for_each_bio(bio, rq)
1129 		nr_bios++;
1130 
1131 	return nr_bios;
1132 }
1133 
1134 void blk_steal_bios(struct bio_list *list, struct request *rq);
1135 
1136 /*
1137  * Request completion related functions.
1138  *
1139  * blk_update_request() completes given number of bytes and updates
1140  * the request without completing it.
1141  */
1142 bool blk_update_request(struct request *rq, blk_status_t error,
1143 			       unsigned int nr_bytes);
1144 void blk_abort_request(struct request *);
1145 
1146 /*
1147  * Number of physical segments as sent to the device.
1148  *
1149  * Normally this is the number of discontiguous data segments sent by the
1150  * submitter.  But for data-less command like discard we might have no
1151  * actual data segments submitted, but the driver might have to add it's
1152  * own special payload.  In that case we still return 1 here so that this
1153  * special payload will be mapped.
1154  */
blk_rq_nr_phys_segments(struct request * rq)1155 static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
1156 {
1157 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1158 		return 1;
1159 	return rq->nr_phys_segments;
1160 }
1161 
1162 /*
1163  * Number of discard segments (or ranges) the driver needs to fill in.
1164  * Each discard bio merged into a request is counted as one segment.
1165  */
blk_rq_nr_discard_segments(struct request * rq)1166 static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
1167 {
1168 	return max_t(unsigned short, rq->nr_phys_segments, 1);
1169 }
1170 
1171 int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
1172 		struct scatterlist *sglist, struct scatterlist **last_sg);
blk_rq_map_sg(struct request_queue * q,struct request * rq,struct scatterlist * sglist)1173 static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1174 		struct scatterlist *sglist)
1175 {
1176 	struct scatterlist *last_sg = NULL;
1177 
1178 	return __blk_rq_map_sg(q, rq, sglist, &last_sg);
1179 }
1180 void blk_dump_rq_flags(struct request *, char *);
1181 
1182 #endif /* BLK_MQ_H */
1183