xref: /linux/block/blk-mq.h (revision cb96a42cc1f50ba1c7b1e9b2343bec80b926107f)
1320ae51fSJens Axboe #ifndef INT_BLK_MQ_H
2320ae51fSJens Axboe #define INT_BLK_MQ_H
3320ae51fSJens Axboe 
424d2f903SChristoph Hellwig struct blk_mq_tag_set;
524d2f903SChristoph Hellwig 
6320ae51fSJens Axboe struct blk_mq_ctx {
7320ae51fSJens Axboe 	struct {
8320ae51fSJens Axboe 		spinlock_t		lock;
9320ae51fSJens Axboe 		struct list_head	rq_list;
10320ae51fSJens Axboe 	}  ____cacheline_aligned_in_smp;
11320ae51fSJens Axboe 
12320ae51fSJens Axboe 	unsigned int		cpu;
13320ae51fSJens Axboe 	unsigned int		index_hw;
14320ae51fSJens Axboe 
154bb659b1SJens Axboe 	unsigned int		last_tag ____cacheline_aligned_in_smp;
164bb659b1SJens Axboe 
17320ae51fSJens Axboe 	/* incremented at dispatch time */
18320ae51fSJens Axboe 	unsigned long		rq_dispatched[2];
19320ae51fSJens Axboe 	unsigned long		rq_merged;
20320ae51fSJens Axboe 
21320ae51fSJens Axboe 	/* incremented at completion time */
22320ae51fSJens Axboe 	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
23320ae51fSJens Axboe 
24320ae51fSJens Axboe 	struct request_queue	*queue;
25320ae51fSJens Axboe 	struct kobject		kobj;
264bb659b1SJens Axboe } ____cacheline_aligned_in_smp;
27320ae51fSJens Axboe 
2830a91cb4SChristoph Hellwig void __blk_mq_complete_request(struct request *rq);
29320ae51fSJens Axboe void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
30320ae51fSJens Axboe void blk_mq_init_flush(struct request_queue *q);
3143a5e4e2SMing Lei void blk_mq_drain_queue(struct request_queue *q);
323edcc0ceSMing Lei void blk_mq_free_queue(struct request_queue *q);
338727af4bSChristoph Hellwig void blk_mq_clone_flush_request(struct request *flush_rq,
348727af4bSChristoph Hellwig 		struct request *orig_rq);
35e3a2b3f9SJens Axboe int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
36320ae51fSJens Axboe 
37320ae51fSJens Axboe /*
38320ae51fSJens Axboe  * CPU hotplug helpers
39320ae51fSJens Axboe  */
40320ae51fSJens Axboe struct blk_mq_cpu_notifier;
41320ae51fSJens Axboe void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
42e814e71bSJens Axboe 			      int (*fn)(void *, unsigned long, unsigned int),
43320ae51fSJens Axboe 			      void *data);
44320ae51fSJens Axboe void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
45320ae51fSJens Axboe void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
46320ae51fSJens Axboe void blk_mq_cpu_init(void);
47676141e4SJens Axboe void blk_mq_enable_hotplug(void);
48676141e4SJens Axboe void blk_mq_disable_hotplug(void);
49320ae51fSJens Axboe 
50320ae51fSJens Axboe /*
51320ae51fSJens Axboe  * CPU -> queue mappings
52320ae51fSJens Axboe  */
5324d2f903SChristoph Hellwig extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
54320ae51fSJens Axboe extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues);
55f14bbe77SJens Axboe extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
56320ae51fSJens Axboe 
57e93ecf60SJens Axboe /*
5867aec14cSJens Axboe  * sysfs helpers
5967aec14cSJens Axboe  */
6067aec14cSJens Axboe extern int blk_mq_sysfs_register(struct request_queue *q);
6167aec14cSJens Axboe extern void blk_mq_sysfs_unregister(struct request_queue *q);
6267aec14cSJens Axboe 
6367aec14cSJens Axboe /*
64e93ecf60SJens Axboe  * Basic implementation of sparser bitmap, allowing the user to spread
65e93ecf60SJens Axboe  * the bits over more cachelines.
66e93ecf60SJens Axboe  */
67e93ecf60SJens Axboe struct blk_align_bitmap {
68e93ecf60SJens Axboe 	unsigned long word;
69e93ecf60SJens Axboe 	unsigned long depth;
70e93ecf60SJens Axboe } ____cacheline_aligned_in_smp;
71e93ecf60SJens Axboe 
721aecfe48SMing Lei static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
731aecfe48SMing Lei 					   unsigned int cpu)
741aecfe48SMing Lei {
751aecfe48SMing Lei 	return per_cpu_ptr(q->queue_ctx, cpu);
761aecfe48SMing Lei }
771aecfe48SMing Lei 
781aecfe48SMing Lei /*
791aecfe48SMing Lei  * This assumes per-cpu software queueing queues. They could be per-node
801aecfe48SMing Lei  * as well, for instance. For now this is hardcoded as-is. Note that we don't
811aecfe48SMing Lei  * care about preemption, since we know the ctx's are persistent. This does
821aecfe48SMing Lei  * mean that we can't rely on ctx always matching the currently running CPU.
831aecfe48SMing Lei  */
841aecfe48SMing Lei static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
851aecfe48SMing Lei {
861aecfe48SMing Lei 	return __blk_mq_get_ctx(q, get_cpu());
871aecfe48SMing Lei }
881aecfe48SMing Lei 
891aecfe48SMing Lei static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
901aecfe48SMing Lei {
911aecfe48SMing Lei 	put_cpu();
921aecfe48SMing Lei }
931aecfe48SMing Lei 
94*cb96a42cSMing Lei struct blk_mq_alloc_data {
95*cb96a42cSMing Lei 	/* input parameter */
96*cb96a42cSMing Lei 	struct request_queue *q;
97*cb96a42cSMing Lei 	gfp_t gfp;
98*cb96a42cSMing Lei 	bool reserved;
99*cb96a42cSMing Lei 
100*cb96a42cSMing Lei 	/* input & output parameter */
101*cb96a42cSMing Lei 	struct blk_mq_ctx *ctx;
102*cb96a42cSMing Lei 	struct blk_mq_hw_ctx *hctx;
103*cb96a42cSMing Lei };
104*cb96a42cSMing Lei 
105*cb96a42cSMing Lei static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data,
106*cb96a42cSMing Lei 		struct request_queue *q, gfp_t gfp, bool reserved,
107*cb96a42cSMing Lei 		struct blk_mq_ctx *ctx,
108*cb96a42cSMing Lei 		struct blk_mq_hw_ctx *hctx)
109*cb96a42cSMing Lei {
110*cb96a42cSMing Lei 	data->q = q;
111*cb96a42cSMing Lei 	data->gfp = gfp;
112*cb96a42cSMing Lei 	data->reserved = reserved;
113*cb96a42cSMing Lei 	data->ctx = ctx;
114*cb96a42cSMing Lei 	data->hctx = hctx;
115*cb96a42cSMing Lei }
116*cb96a42cSMing Lei 
117320ae51fSJens Axboe #endif
118