xref: /linux/block/blk-mq-tag.c (revision 9b960d8cd6f712cb2c03e2bdd4d5ca058238037f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Tag allocation using scalable bitmaps. Uses active queue tracking to support
4  * fairer distribution of tags between multiple submitters when a shared tag map
5  * is used.
6  *
7  * Copyright (C) 2013-2014 Jens Axboe
8  */
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 
12 #include <linux/delay.h>
13 #include "blk.h"
14 #include "blk-mq.h"
15 #include "blk-mq-sched.h"
16 
17 /*
18  * Recalculate wakeup batch when tag is shared by hctx.
19  */
blk_mq_update_wake_batch(struct blk_mq_tags * tags,unsigned int users)20 static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
21 		unsigned int users)
22 {
23 	if (!users)
24 		return;
25 
26 	sbitmap_queue_recalculate_wake_batch(&tags->bitmap_tags,
27 			users);
28 	sbitmap_queue_recalculate_wake_batch(&tags->breserved_tags,
29 			users);
30 }
31 
32 /*
33  * If a previously inactive queue goes active, bump the active user count.
34  * We need to do this before try to allocate driver tag, then even if fail
35  * to get tag when first time, the other shared-tag users could reserve
36  * budget for it.
37  */
__blk_mq_tag_busy(struct blk_mq_hw_ctx * hctx)38 void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
39 {
40 	unsigned int users;
41 	unsigned long flags;
42 	struct blk_mq_tags *tags = hctx->tags;
43 
44 	/*
45 	 * calling test_bit() prior to test_and_set_bit() is intentional,
46 	 * it avoids dirtying the cacheline if the queue is already active.
47 	 */
48 	if (blk_mq_is_shared_tags(hctx->flags)) {
49 		struct request_queue *q = hctx->queue;
50 
51 		if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) ||
52 		    test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
53 			return;
54 	} else {
55 		if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) ||
56 		    test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
57 			return;
58 	}
59 
60 	spin_lock_irqsave(&tags->lock, flags);
61 	users = tags->active_queues + 1;
62 	WRITE_ONCE(tags->active_queues, users);
63 	blk_mq_update_wake_batch(tags, users);
64 	spin_unlock_irqrestore(&tags->lock, flags);
65 }
66 
67 /*
68  * Wakeup all potentially sleeping on tags
69  */
blk_mq_tag_wakeup_all(struct blk_mq_tags * tags,bool include_reserve)70 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
71 {
72 	sbitmap_queue_wake_all(&tags->bitmap_tags);
73 	if (include_reserve)
74 		sbitmap_queue_wake_all(&tags->breserved_tags);
75 }
76 
77 /*
78  * If a previously busy queue goes inactive, potential waiters could now
79  * be allowed to queue. Wake them up and check.
80  */
__blk_mq_tag_idle(struct blk_mq_hw_ctx * hctx)81 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
82 {
83 	struct blk_mq_tags *tags = hctx->tags;
84 	unsigned int users;
85 
86 	if (blk_mq_is_shared_tags(hctx->flags)) {
87 		struct request_queue *q = hctx->queue;
88 
89 		if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE,
90 					&q->queue_flags))
91 			return;
92 	} else {
93 		if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
94 			return;
95 	}
96 
97 	spin_lock_irq(&tags->lock);
98 	users = tags->active_queues - 1;
99 	WRITE_ONCE(tags->active_queues, users);
100 	blk_mq_update_wake_batch(tags, users);
101 	spin_unlock_irq(&tags->lock);
102 
103 	blk_mq_tag_wakeup_all(tags, false);
104 }
105 
__blk_mq_get_tag(struct blk_mq_alloc_data * data,struct sbitmap_queue * bt)106 static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
107 			    struct sbitmap_queue *bt)
108 {
109 	if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) &&
110 			!hctx_may_queue(data->hctx, bt))
111 		return BLK_MQ_NO_TAG;
112 
113 	if (data->shallow_depth)
114 		return sbitmap_queue_get_shallow(bt, data->shallow_depth);
115 	else
116 		return __sbitmap_queue_get(bt);
117 }
118 
blk_mq_get_tags(struct blk_mq_alloc_data * data,int nr_tags,unsigned int * offset)119 unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags,
120 			      unsigned int *offset)
121 {
122 	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
123 	struct sbitmap_queue *bt = &tags->bitmap_tags;
124 	unsigned long ret;
125 
126 	if (data->shallow_depth ||data->flags & BLK_MQ_REQ_RESERVED ||
127 	    data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
128 		return 0;
129 	ret = __sbitmap_queue_get_batch(bt, nr_tags, offset);
130 	*offset += tags->nr_reserved_tags;
131 	return ret;
132 }
133 
blk_mq_get_tag(struct blk_mq_alloc_data * data)134 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
135 {
136 	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
137 	struct sbitmap_queue *bt;
138 	struct sbq_wait_state *ws;
139 	DEFINE_SBQ_WAIT(wait);
140 	unsigned int tag_offset;
141 	int tag;
142 
143 	if (data->flags & BLK_MQ_REQ_RESERVED) {
144 		if (unlikely(!tags->nr_reserved_tags)) {
145 			WARN_ON_ONCE(1);
146 			return BLK_MQ_NO_TAG;
147 		}
148 		bt = &tags->breserved_tags;
149 		tag_offset = 0;
150 	} else {
151 		bt = &tags->bitmap_tags;
152 		tag_offset = tags->nr_reserved_tags;
153 	}
154 
155 	tag = __blk_mq_get_tag(data, bt);
156 	if (tag != BLK_MQ_NO_TAG)
157 		goto found_tag;
158 
159 	if (data->flags & BLK_MQ_REQ_NOWAIT)
160 		return BLK_MQ_NO_TAG;
161 
162 	ws = bt_wait_ptr(bt, data->hctx);
163 	do {
164 		struct sbitmap_queue *bt_prev;
165 
166 		/*
167 		 * We're out of tags on this hardware queue, kick any
168 		 * pending IO submits before going to sleep waiting for
169 		 * some to complete.
170 		 */
171 		blk_mq_run_hw_queue(data->hctx, false);
172 
173 		/*
174 		 * Retry tag allocation after running the hardware queue,
175 		 * as running the queue may also have found completions.
176 		 */
177 		tag = __blk_mq_get_tag(data, bt);
178 		if (tag != BLK_MQ_NO_TAG)
179 			break;
180 
181 		sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
182 
183 		tag = __blk_mq_get_tag(data, bt);
184 		if (tag != BLK_MQ_NO_TAG)
185 			break;
186 
187 		bt_prev = bt;
188 		io_schedule();
189 
190 		sbitmap_finish_wait(bt, ws, &wait);
191 
192 		data->ctx = blk_mq_get_ctx(data->q);
193 		data->hctx = blk_mq_map_queue(data->cmd_flags, data->ctx);
194 		tags = blk_mq_tags_from_data(data);
195 		if (data->flags & BLK_MQ_REQ_RESERVED)
196 			bt = &tags->breserved_tags;
197 		else
198 			bt = &tags->bitmap_tags;
199 
200 		/*
201 		 * If destination hw queue is changed, fake wake up on
202 		 * previous queue for compensating the wake up miss, so
203 		 * other allocations on previous queue won't be starved.
204 		 */
205 		if (bt != bt_prev)
206 			sbitmap_queue_wake_up(bt_prev, 1);
207 
208 		ws = bt_wait_ptr(bt, data->hctx);
209 	} while (1);
210 
211 	sbitmap_finish_wait(bt, ws, &wait);
212 
213 found_tag:
214 	/*
215 	 * Give up this allocation if the hctx is inactive.  The caller will
216 	 * retry on an active hctx.
217 	 */
218 	if (unlikely(test_bit(BLK_MQ_S_INACTIVE, &data->hctx->state))) {
219 		blk_mq_put_tag(tags, data->ctx, tag + tag_offset);
220 		return BLK_MQ_NO_TAG;
221 	}
222 	return tag + tag_offset;
223 }
224 
blk_mq_put_tag(struct blk_mq_tags * tags,struct blk_mq_ctx * ctx,unsigned int tag)225 void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
226 		    unsigned int tag)
227 {
228 	if (!blk_mq_tag_is_reserved(tags, tag)) {
229 		const int real_tag = tag - tags->nr_reserved_tags;
230 
231 		BUG_ON(real_tag >= tags->nr_tags);
232 		sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
233 	} else {
234 		sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
235 	}
236 }
237 
blk_mq_put_tags(struct blk_mq_tags * tags,int * tag_array,int nr_tags)238 void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags)
239 {
240 	sbitmap_queue_clear_batch(&tags->bitmap_tags, tags->nr_reserved_tags,
241 					tag_array, nr_tags);
242 }
243 
244 struct bt_iter_data {
245 	struct blk_mq_hw_ctx *hctx;
246 	struct request_queue *q;
247 	busy_tag_iter_fn *fn;
248 	void *data;
249 	bool reserved;
250 };
251 
blk_mq_find_and_get_req(struct blk_mq_tags * tags,unsigned int bitnr)252 static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
253 		unsigned int bitnr)
254 {
255 	struct request *rq;
256 	unsigned long flags;
257 
258 	spin_lock_irqsave(&tags->lock, flags);
259 	rq = tags->rqs[bitnr];
260 	if (!rq || rq->tag != bitnr || !req_ref_inc_not_zero(rq))
261 		rq = NULL;
262 	spin_unlock_irqrestore(&tags->lock, flags);
263 	return rq;
264 }
265 
bt_iter(struct sbitmap * bitmap,unsigned int bitnr,void * data)266 static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
267 {
268 	struct bt_iter_data *iter_data = data;
269 	struct blk_mq_hw_ctx *hctx = iter_data->hctx;
270 	struct request_queue *q = iter_data->q;
271 	struct blk_mq_tag_set *set = q->tag_set;
272 	struct blk_mq_tags *tags;
273 	struct request *rq;
274 	bool ret = true;
275 
276 	if (blk_mq_is_shared_tags(set->flags))
277 		tags = set->shared_tags;
278 	else
279 		tags = hctx->tags;
280 
281 	if (!iter_data->reserved)
282 		bitnr += tags->nr_reserved_tags;
283 	/*
284 	 * We can hit rq == NULL here, because the tagging functions
285 	 * test and set the bit before assigning ->rqs[].
286 	 */
287 	rq = blk_mq_find_and_get_req(tags, bitnr);
288 	if (!rq)
289 		return true;
290 
291 	if (rq->q == q && (!hctx || rq->mq_hctx == hctx))
292 		ret = iter_data->fn(rq, iter_data->data);
293 	blk_mq_put_rq_ref(rq);
294 	return ret;
295 }
296 
297 /**
298  * bt_for_each - iterate over the requests associated with a hardware queue
299  * @hctx:	Hardware queue to examine.
300  * @q:		Request queue to examine.
301  * @bt:		sbitmap to examine. This is either the breserved_tags member
302  *		or the bitmap_tags member of struct blk_mq_tags.
303  * @fn:		Pointer to the function that will be called for each request
304  *		associated with @hctx that has been assigned a driver tag.
305  *		@fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
306  *		where rq is a pointer to a request. Return true to continue
307  *		iterating tags, false to stop.
308  * @data:	Will be passed as third argument to @fn.
309  * @reserved:	Indicates whether @bt is the breserved_tags member or the
310  *		bitmap_tags member of struct blk_mq_tags.
311  */
bt_for_each(struct blk_mq_hw_ctx * hctx,struct request_queue * q,struct sbitmap_queue * bt,busy_tag_iter_fn * fn,void * data,bool reserved)312 static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct request_queue *q,
313 			struct sbitmap_queue *bt, busy_tag_iter_fn *fn,
314 			void *data, bool reserved)
315 {
316 	struct bt_iter_data iter_data = {
317 		.hctx = hctx,
318 		.fn = fn,
319 		.data = data,
320 		.reserved = reserved,
321 		.q = q,
322 	};
323 
324 	sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
325 }
326 
327 struct bt_tags_iter_data {
328 	struct blk_mq_tags *tags;
329 	busy_tag_iter_fn *fn;
330 	void *data;
331 	unsigned int flags;
332 };
333 
334 #define BT_TAG_ITER_RESERVED		(1 << 0)
335 #define BT_TAG_ITER_STARTED		(1 << 1)
336 #define BT_TAG_ITER_STATIC_RQS		(1 << 2)
337 
bt_tags_iter(struct sbitmap * bitmap,unsigned int bitnr,void * data)338 static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
339 {
340 	struct bt_tags_iter_data *iter_data = data;
341 	struct blk_mq_tags *tags = iter_data->tags;
342 	struct request *rq;
343 	bool ret = true;
344 	bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS);
345 
346 	if (!(iter_data->flags & BT_TAG_ITER_RESERVED))
347 		bitnr += tags->nr_reserved_tags;
348 
349 	/*
350 	 * We can hit rq == NULL here, because the tagging functions
351 	 * test and set the bit before assigning ->rqs[].
352 	 */
353 	if (iter_static_rqs)
354 		rq = tags->static_rqs[bitnr];
355 	else
356 		rq = blk_mq_find_and_get_req(tags, bitnr);
357 	if (!rq)
358 		return true;
359 
360 	if (!(iter_data->flags & BT_TAG_ITER_STARTED) ||
361 	    blk_mq_request_started(rq))
362 		ret = iter_data->fn(rq, iter_data->data);
363 	if (!iter_static_rqs)
364 		blk_mq_put_rq_ref(rq);
365 	return ret;
366 }
367 
368 /**
369  * bt_tags_for_each - iterate over the requests in a tag map
370  * @tags:	Tag map to iterate over.
371  * @bt:		sbitmap to examine. This is either the breserved_tags member
372  *		or the bitmap_tags member of struct blk_mq_tags.
373  * @fn:		Pointer to the function that will be called for each started
374  *		request. @fn will be called as follows: @fn(rq, @data,
375  *		@reserved) where rq is a pointer to a request. Return true
376  *		to continue iterating tags, false to stop.
377  * @data:	Will be passed as second argument to @fn.
378  * @flags:	BT_TAG_ITER_*
379  */
bt_tags_for_each(struct blk_mq_tags * tags,struct sbitmap_queue * bt,busy_tag_iter_fn * fn,void * data,unsigned int flags)380 static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
381 			     busy_tag_iter_fn *fn, void *data, unsigned int flags)
382 {
383 	struct bt_tags_iter_data iter_data = {
384 		.tags = tags,
385 		.fn = fn,
386 		.data = data,
387 		.flags = flags,
388 	};
389 
390 	if (tags->rqs)
391 		sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
392 }
393 
__blk_mq_all_tag_iter(struct blk_mq_tags * tags,busy_tag_iter_fn * fn,void * priv,unsigned int flags)394 static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
395 		busy_tag_iter_fn *fn, void *priv, unsigned int flags)
396 {
397 	WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED);
398 
399 	if (tags->nr_reserved_tags)
400 		bt_tags_for_each(tags, &tags->breserved_tags, fn, priv,
401 				 flags | BT_TAG_ITER_RESERVED);
402 	bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, flags);
403 }
404 
405 /**
406  * blk_mq_all_tag_iter - iterate over all requests in a tag map
407  * @tags:	Tag map to iterate over.
408  * @fn:		Pointer to the function that will be called for each
409  *		request. @fn will be called as follows: @fn(rq, @priv,
410  *		reserved) where rq is a pointer to a request. 'reserved'
411  *		indicates whether or not @rq is a reserved request. Return
412  *		true to continue iterating tags, false to stop.
413  * @priv:	Will be passed as second argument to @fn.
414  *
415  * Caller has to pass the tag map from which requests are allocated.
416  */
blk_mq_all_tag_iter(struct blk_mq_tags * tags,busy_tag_iter_fn * fn,void * priv)417 void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
418 		void *priv)
419 {
420 	__blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS);
421 }
422 
423 /**
424  * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
425  * @tagset:	Tag set to iterate over.
426  * @fn:		Pointer to the function that will be called for each started
427  *		request. @fn will be called as follows: @fn(rq, @priv,
428  *		reserved) where rq is a pointer to a request. 'reserved'
429  *		indicates whether or not @rq is a reserved request. Return
430  *		true to continue iterating tags, false to stop.
431  * @priv:	Will be passed as second argument to @fn.
432  *
433  * We grab one request reference before calling @fn and release it after
434  * @fn returns.
435  */
blk_mq_tagset_busy_iter(struct blk_mq_tag_set * tagset,busy_tag_iter_fn * fn,void * priv)436 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
437 		busy_tag_iter_fn *fn, void *priv)
438 {
439 	unsigned int flags = tagset->flags;
440 	int i, nr_tags;
441 
442 	nr_tags = blk_mq_is_shared_tags(flags) ? 1 : tagset->nr_hw_queues;
443 
444 	for (i = 0; i < nr_tags; i++) {
445 		if (tagset->tags && tagset->tags[i])
446 			__blk_mq_all_tag_iter(tagset->tags[i], fn, priv,
447 					      BT_TAG_ITER_STARTED);
448 	}
449 }
450 EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
451 
blk_mq_tagset_count_completed_rqs(struct request * rq,void * data)452 static bool blk_mq_tagset_count_completed_rqs(struct request *rq, void *data)
453 {
454 	unsigned *count = data;
455 
456 	if (blk_mq_request_completed(rq))
457 		(*count)++;
458 	return true;
459 }
460 
461 /**
462  * blk_mq_tagset_wait_completed_request - Wait until all scheduled request
463  * completions have finished.
464  * @tagset:	Tag set to drain completed request
465  *
466  * Note: This function has to be run after all IO queues are shutdown
467  */
blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set * tagset)468 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset)
469 {
470 	while (true) {
471 		unsigned count = 0;
472 
473 		blk_mq_tagset_busy_iter(tagset,
474 				blk_mq_tagset_count_completed_rqs, &count);
475 		if (!count)
476 			break;
477 		msleep(5);
478 	}
479 }
480 EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);
481 
482 /**
483  * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
484  * @q:		Request queue to examine.
485  * @fn:		Pointer to the function that will be called for each request
486  *		on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
487  *		reserved) where rq is a pointer to a request and hctx points
488  *		to the hardware queue associated with the request. 'reserved'
489  *		indicates whether or not @rq is a reserved request.
490  * @priv:	Will be passed as third argument to @fn.
491  *
492  * Note: if @q->tag_set is shared with other request queues then @fn will be
493  * called for all requests on all queues that share that tag set and not only
494  * for requests associated with @q.
495  */
blk_mq_queue_tag_busy_iter(struct request_queue * q,busy_tag_iter_fn * fn,void * priv)496 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
497 		void *priv)
498 {
499 	/*
500 	 * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and hctx_table
501 	 * while the queue is frozen. So we can use q_usage_counter to avoid
502 	 * racing with it.
503 	 */
504 	if (!percpu_ref_tryget(&q->q_usage_counter))
505 		return;
506 
507 	if (blk_mq_is_shared_tags(q->tag_set->flags)) {
508 		struct blk_mq_tags *tags = q->tag_set->shared_tags;
509 		struct sbitmap_queue *bresv = &tags->breserved_tags;
510 		struct sbitmap_queue *btags = &tags->bitmap_tags;
511 
512 		if (tags->nr_reserved_tags)
513 			bt_for_each(NULL, q, bresv, fn, priv, true);
514 		bt_for_each(NULL, q, btags, fn, priv, false);
515 	} else {
516 		struct blk_mq_hw_ctx *hctx;
517 		unsigned long i;
518 
519 		queue_for_each_hw_ctx(q, hctx, i) {
520 			struct blk_mq_tags *tags = hctx->tags;
521 			struct sbitmap_queue *bresv = &tags->breserved_tags;
522 			struct sbitmap_queue *btags = &tags->bitmap_tags;
523 
524 			/*
525 			 * If no software queues are currently mapped to this
526 			 * hardware queue, there's nothing to check
527 			 */
528 			if (!blk_mq_hw_queue_mapped(hctx))
529 				continue;
530 
531 			if (tags->nr_reserved_tags)
532 				bt_for_each(hctx, q, bresv, fn, priv, true);
533 			bt_for_each(hctx, q, btags, fn, priv, false);
534 		}
535 	}
536 	blk_queue_exit(q);
537 }
538 
bt_alloc(struct sbitmap_queue * bt,unsigned int depth,bool round_robin,int node)539 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
540 		    bool round_robin, int node)
541 {
542 	return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
543 				       node);
544 }
545 
blk_mq_init_tags(unsigned int total_tags,unsigned int reserved_tags,unsigned int flags,int node)546 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
547 		unsigned int reserved_tags, unsigned int flags, int node)
548 {
549 	unsigned int depth = total_tags - reserved_tags;
550 	bool round_robin = flags & BLK_MQ_F_TAG_RR;
551 	struct blk_mq_tags *tags;
552 
553 	if (total_tags > BLK_MQ_TAG_MAX) {
554 		pr_err("blk-mq: tag depth too large\n");
555 		return NULL;
556 	}
557 
558 	tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
559 	if (!tags)
560 		return NULL;
561 
562 	tags->nr_tags = total_tags;
563 	tags->nr_reserved_tags = reserved_tags;
564 	spin_lock_init(&tags->lock);
565 	if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node))
566 		goto out_free_tags;
567 	if (bt_alloc(&tags->breserved_tags, reserved_tags, round_robin, node))
568 		goto out_free_bitmap_tags;
569 
570 	return tags;
571 
572 out_free_bitmap_tags:
573 	sbitmap_queue_free(&tags->bitmap_tags);
574 out_free_tags:
575 	kfree(tags);
576 	return NULL;
577 }
578 
blk_mq_free_tags(struct blk_mq_tags * tags)579 void blk_mq_free_tags(struct blk_mq_tags *tags)
580 {
581 	sbitmap_queue_free(&tags->bitmap_tags);
582 	sbitmap_queue_free(&tags->breserved_tags);
583 	kfree(tags);
584 }
585 
blk_mq_tag_update_depth(struct blk_mq_hw_ctx * hctx,struct blk_mq_tags ** tagsptr,unsigned int tdepth,bool can_grow)586 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
587 			    struct blk_mq_tags **tagsptr, unsigned int tdepth,
588 			    bool can_grow)
589 {
590 	struct blk_mq_tags *tags = *tagsptr;
591 
592 	if (tdepth <= tags->nr_reserved_tags)
593 		return -EINVAL;
594 
595 	/*
596 	 * If we are allowed to grow beyond the original size, allocate
597 	 * a new set of tags before freeing the old one.
598 	 */
599 	if (tdepth > tags->nr_tags) {
600 		struct blk_mq_tag_set *set = hctx->queue->tag_set;
601 		struct blk_mq_tags *new;
602 
603 		if (!can_grow)
604 			return -EINVAL;
605 
606 		/*
607 		 * We need some sort of upper limit, set it high enough that
608 		 * no valid use cases should require more.
609 		 */
610 		if (tdepth > MAX_SCHED_RQ)
611 			return -EINVAL;
612 
613 		/*
614 		 * Only the sbitmap needs resizing since we allocated the max
615 		 * initially.
616 		 */
617 		if (blk_mq_is_shared_tags(set->flags))
618 			return 0;
619 
620 		new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth);
621 		if (!new)
622 			return -ENOMEM;
623 
624 		blk_mq_free_map_and_rqs(set, *tagsptr, hctx->queue_num);
625 		*tagsptr = new;
626 	} else {
627 		/*
628 		 * Don't need (or can't) update reserved tags here, they
629 		 * remain static and should never need resizing.
630 		 */
631 		sbitmap_queue_resize(&tags->bitmap_tags,
632 				tdepth - tags->nr_reserved_tags);
633 	}
634 
635 	return 0;
636 }
637 
blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set * set,unsigned int size)638 void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size)
639 {
640 	struct blk_mq_tags *tags = set->shared_tags;
641 
642 	sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags);
643 }
644 
blk_mq_tag_update_sched_shared_tags(struct request_queue * q)645 void blk_mq_tag_update_sched_shared_tags(struct request_queue *q)
646 {
647 	sbitmap_queue_resize(&q->sched_shared_tags->bitmap_tags,
648 			     q->nr_requests - q->tag_set->reserved_tags);
649 }
650 
651 /**
652  * blk_mq_unique_tag() - return a tag that is unique queue-wide
653  * @rq: request for which to compute a unique tag
654  *
655  * The tag field in struct request is unique per hardware queue but not over
656  * all hardware queues. Hence this function that returns a tag with the
657  * hardware context index in the upper bits and the per hardware queue tag in
658  * the lower bits.
659  *
660  * Note: When called for a request that is queued on a non-multiqueue request
661  * queue, the hardware context index is set to zero.
662  */
blk_mq_unique_tag(struct request * rq)663 u32 blk_mq_unique_tag(struct request *rq)
664 {
665 	return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
666 		(rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
667 }
668 EXPORT_SYMBOL(blk_mq_unique_tag);
669