xref: /linux/block/blk-mq.h (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 #ifndef INT_BLK_MQ_H
2 #define INT_BLK_MQ_H
3 
4 struct blk_mq_tag_set;
5 
6 struct blk_mq_ctx {
7 	struct {
8 		spinlock_t		lock;
9 		struct list_head	rq_list;
10 	}  ____cacheline_aligned_in_smp;
11 
12 	unsigned int		cpu;
13 	unsigned int		index_hw;
14 
15 	unsigned int		last_tag ____cacheline_aligned_in_smp;
16 
17 	/* incremented at dispatch time */
18 	unsigned long		rq_dispatched[2];
19 	unsigned long		rq_merged;
20 
21 	/* incremented at completion time */
22 	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
23 
24 	struct request_queue	*queue;
25 	struct kobject		kobj;
26 } ____cacheline_aligned_in_smp;
27 
28 void __blk_mq_complete_request(struct request *rq);
29 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
30 void blk_mq_freeze_queue(struct request_queue *q);
31 void blk_mq_free_queue(struct request_queue *q);
32 void blk_mq_clone_flush_request(struct request *flush_rq,
33 		struct request *orig_rq);
34 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
35 void blk_mq_wake_waiters(struct request_queue *q);
36 
37 /*
38  * CPU hotplug helpers
39  */
40 struct blk_mq_cpu_notifier;
41 void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
42 			      int (*fn)(void *, unsigned long, unsigned int),
43 			      void *data);
44 void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
45 void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
46 void blk_mq_cpu_init(void);
47 void blk_mq_enable_hotplug(void);
48 void blk_mq_disable_hotplug(void);
49 
50 /*
51  * CPU -> queue mappings
52  */
53 extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
54 extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
55 				   const struct cpumask *online_mask);
56 extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
57 
58 /*
59  * sysfs helpers
60  */
61 extern int blk_mq_sysfs_register(struct request_queue *q);
62 extern void blk_mq_sysfs_unregister(struct request_queue *q);
63 
64 extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
65 
66 void blk_mq_release(struct request_queue *q);
67 
68 /*
69  * Basic implementation of sparser bitmap, allowing the user to spread
70  * the bits over more cachelines.
71  */
72 struct blk_align_bitmap {
73 	unsigned long word;
74 	unsigned long depth;
75 } ____cacheline_aligned_in_smp;
76 
77 static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
78 					   unsigned int cpu)
79 {
80 	return per_cpu_ptr(q->queue_ctx, cpu);
81 }
82 
83 /*
84  * This assumes per-cpu software queueing queues. They could be per-node
85  * as well, for instance. For now this is hardcoded as-is. Note that we don't
86  * care about preemption, since we know the ctx's are persistent. This does
87  * mean that we can't rely on ctx always matching the currently running CPU.
88  */
89 static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
90 {
91 	return __blk_mq_get_ctx(q, get_cpu());
92 }
93 
94 static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
95 {
96 	put_cpu();
97 }
98 
99 struct blk_mq_alloc_data {
100 	/* input parameter */
101 	struct request_queue *q;
102 	gfp_t gfp;
103 	bool reserved;
104 
105 	/* input & output parameter */
106 	struct blk_mq_ctx *ctx;
107 	struct blk_mq_hw_ctx *hctx;
108 };
109 
110 static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data,
111 		struct request_queue *q, gfp_t gfp, bool reserved,
112 		struct blk_mq_ctx *ctx,
113 		struct blk_mq_hw_ctx *hctx)
114 {
115 	data->q = q;
116 	data->gfp = gfp;
117 	data->reserved = reserved;
118 	data->ctx = ctx;
119 	data->hctx = hctx;
120 }
121 
122 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
123 {
124 	return hctx->nr_ctx && hctx->tags;
125 }
126 
127 #endif
128