xref: /linux/block/blk-mq-tag.c (revision c4ee0af3fa0dc65f690fc908f02b8355f9576ea0)
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/percpu_ida.h>
4 
5 #include <linux/blk-mq.h>
6 #include "blk.h"
7 #include "blk-mq.h"
8 #include "blk-mq-tag.h"
9 
10 /*
11  * Per tagged queue (tag address space) map
12  */
13 struct blk_mq_tags {
14 	unsigned int nr_tags;
15 	unsigned int nr_reserved_tags;
16 	unsigned int nr_batch_move;
17 	unsigned int nr_max_cache;
18 
19 	struct percpu_ida free_tags;
20 	struct percpu_ida reserved_tags;
21 };
22 
23 void blk_mq_wait_for_tags(struct blk_mq_tags *tags)
24 {
25 	int tag = blk_mq_get_tag(tags, __GFP_WAIT, false);
26 	blk_mq_put_tag(tags, tag);
27 }
28 
29 bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
30 {
31 	return !tags ||
32 		percpu_ida_free_tags(&tags->free_tags, nr_cpu_ids) != 0;
33 }
34 
35 static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp)
36 {
37 	int tag;
38 
39 	tag = percpu_ida_alloc(&tags->free_tags, gfp);
40 	if (tag < 0)
41 		return BLK_MQ_TAG_FAIL;
42 	return tag + tags->nr_reserved_tags;
43 }
44 
45 static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags,
46 					      gfp_t gfp)
47 {
48 	int tag;
49 
50 	if (unlikely(!tags->nr_reserved_tags)) {
51 		WARN_ON_ONCE(1);
52 		return BLK_MQ_TAG_FAIL;
53 	}
54 
55 	tag = percpu_ida_alloc(&tags->reserved_tags, gfp);
56 	if (tag < 0)
57 		return BLK_MQ_TAG_FAIL;
58 	return tag;
59 }
60 
61 unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp, bool reserved)
62 {
63 	if (!reserved)
64 		return __blk_mq_get_tag(tags, gfp);
65 
66 	return __blk_mq_get_reserved_tag(tags, gfp);
67 }
68 
69 static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag)
70 {
71 	BUG_ON(tag >= tags->nr_tags);
72 
73 	percpu_ida_free(&tags->free_tags, tag - tags->nr_reserved_tags);
74 }
75 
76 static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags,
77 				      unsigned int tag)
78 {
79 	BUG_ON(tag >= tags->nr_reserved_tags);
80 
81 	percpu_ida_free(&tags->reserved_tags, tag);
82 }
83 
84 void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag)
85 {
86 	if (tag >= tags->nr_reserved_tags)
87 		__blk_mq_put_tag(tags, tag);
88 	else
89 		__blk_mq_put_reserved_tag(tags, tag);
90 }
91 
92 static int __blk_mq_tag_iter(unsigned id, void *data)
93 {
94 	unsigned long *tag_map = data;
95 	__set_bit(id, tag_map);
96 	return 0;
97 }
98 
99 void blk_mq_tag_busy_iter(struct blk_mq_tags *tags,
100 			  void (*fn)(void *, unsigned long *), void *data)
101 {
102 	unsigned long *tag_map;
103 	size_t map_size;
104 
105 	map_size = ALIGN(tags->nr_tags, BITS_PER_LONG) / BITS_PER_LONG;
106 	tag_map = kzalloc(map_size * sizeof(unsigned long), GFP_ATOMIC);
107 	if (!tag_map)
108 		return;
109 
110 	percpu_ida_for_each_free(&tags->free_tags, __blk_mq_tag_iter, tag_map);
111 	if (tags->nr_reserved_tags)
112 		percpu_ida_for_each_free(&tags->reserved_tags, __blk_mq_tag_iter,
113 			tag_map);
114 
115 	fn(data, tag_map);
116 	kfree(tag_map);
117 }
118 
119 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
120 				     unsigned int reserved_tags, int node)
121 {
122 	unsigned int nr_tags, nr_cache;
123 	struct blk_mq_tags *tags;
124 	int ret;
125 
126 	if (total_tags > BLK_MQ_TAG_MAX) {
127 		pr_err("blk-mq: tag depth too large\n");
128 		return NULL;
129 	}
130 
131 	tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
132 	if (!tags)
133 		return NULL;
134 
135 	nr_tags = total_tags - reserved_tags;
136 	nr_cache = nr_tags / num_possible_cpus();
137 
138 	if (nr_cache < BLK_MQ_TAG_CACHE_MIN)
139 		nr_cache = BLK_MQ_TAG_CACHE_MIN;
140 	else if (nr_cache > BLK_MQ_TAG_CACHE_MAX)
141 		nr_cache = BLK_MQ_TAG_CACHE_MAX;
142 
143 	tags->nr_tags = total_tags;
144 	tags->nr_reserved_tags = reserved_tags;
145 	tags->nr_max_cache = nr_cache;
146 	tags->nr_batch_move = max(1u, nr_cache / 2);
147 
148 	ret = __percpu_ida_init(&tags->free_tags, tags->nr_tags -
149 				tags->nr_reserved_tags,
150 				tags->nr_max_cache,
151 				tags->nr_batch_move);
152 	if (ret)
153 		goto err_free_tags;
154 
155 	if (reserved_tags) {
156 		/*
157 		 * With max_cahe and batch set to 1, the allocator fallbacks to
158 		 * no cached. It's fine reserved tags allocation is slow.
159 		 */
160 		ret = __percpu_ida_init(&tags->reserved_tags, reserved_tags,
161 				1, 1);
162 		if (ret)
163 			goto err_reserved_tags;
164 	}
165 
166 	return tags;
167 
168 err_reserved_tags:
169 	percpu_ida_destroy(&tags->free_tags);
170 err_free_tags:
171 	kfree(tags);
172 	return NULL;
173 }
174 
175 void blk_mq_free_tags(struct blk_mq_tags *tags)
176 {
177 	percpu_ida_destroy(&tags->free_tags);
178 	percpu_ida_destroy(&tags->reserved_tags);
179 	kfree(tags);
180 }
181 
182 ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
183 {
184 	char *orig_page = page;
185 	int cpu;
186 
187 	if (!tags)
188 		return 0;
189 
190 	page += sprintf(page, "nr_tags=%u, reserved_tags=%u, batch_move=%u,"
191 			" max_cache=%u\n", tags->nr_tags, tags->nr_reserved_tags,
192 			tags->nr_batch_move, tags->nr_max_cache);
193 
194 	page += sprintf(page, "nr_free=%u, nr_reserved=%u\n",
195 			percpu_ida_free_tags(&tags->free_tags, nr_cpu_ids),
196 			percpu_ida_free_tags(&tags->reserved_tags, nr_cpu_ids));
197 
198 	for_each_possible_cpu(cpu) {
199 		page += sprintf(page, "  cpu%02u: nr_free=%u\n", cpu,
200 				percpu_ida_free_tags(&tags->free_tags, cpu));
201 	}
202 
203 	return page - orig_page;
204 }
205