blk-mq.h (645db34e50501aac141713fb47a315e5202ff890) blk-mq.h (e155b0c238b20f0a866f4334d292656665836c8a)
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef INT_BLK_MQ_H
3#define INT_BLK_MQ_H
4
5#include "blk-stat.h"
6#include "blk-mq-tag.h"
7
8struct blk_mq_tag_set;

--- 40 unchanged lines hidden (view full) ---

49 struct blk_mq_ctx *start);
50void blk_mq_put_rq_ref(struct request *rq);
51
52/*
53 * Internal helpers for allocating/freeing the request map
54 */
55void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
56 unsigned int hctx_idx);
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef INT_BLK_MQ_H
3#define INT_BLK_MQ_H
4
5#include "blk-stat.h"
6#include "blk-mq-tag.h"
7
8struct blk_mq_tag_set;

--- 40 unchanged lines hidden (view full) ---

49 struct blk_mq_ctx *start);
50void blk_mq_put_rq_ref(struct request *rq);
51
52/*
53 * Internal helpers for allocating/freeing the request map
54 */
55void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
56 unsigned int hctx_idx);
57void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags);
57void blk_mq_free_rq_map(struct blk_mq_tags *tags);
58struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
59 unsigned int hctx_idx, unsigned int depth);
60void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
61 struct blk_mq_tags *tags,
62 unsigned int hctx_idx);
63/*
64 * Internal helpers for request insertion into sw queues
65 */

--- 259 unchanged lines hidden (view full) ---

325 /*
326 * Don't try dividing an ant
327 */
328 if (bt->sb.depth == 1)
329 return true;
330
331 if (blk_mq_is_sbitmap_shared(hctx->flags)) {
332 struct request_queue *q = hctx->queue;
58struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
59 unsigned int hctx_idx, unsigned int depth);
60void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
61 struct blk_mq_tags *tags,
62 unsigned int hctx_idx);
63/*
64 * Internal helpers for request insertion into sw queues
65 */

--- 259 unchanged lines hidden (view full) ---

325 /*
326 * Don't try dividing an ant
327 */
328 if (bt->sb.depth == 1)
329 return true;
330
331 if (blk_mq_is_sbitmap_shared(hctx->flags)) {
332 struct request_queue *q = hctx->queue;
333 struct blk_mq_tag_set *set = q->tag_set;
334
335 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
336 return true;
333
334 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
335 return true;
337 users = atomic_read(&set->active_queues_shared_sbitmap);
338 } else {
339 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
340 return true;
336 } else {
337 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
338 return true;
341 users = atomic_read(&hctx->tags->active_queues);
342 }
343
339 }
340
341 users = atomic_read(&hctx->tags->active_queues);
342
344 if (!users)
345 return true;
346
347 /*
348 * Allow at least some tags
349 */
350 depth = max((bt->sb.depth + users - 1) / users, 4U);
351 return __blk_mq_active_requests(hctx) < depth;
352}
353
354
355#endif
343 if (!users)
344 return true;
345
346 /*
347 * Allow at least some tags
348 */
349 depth = max((bt->sb.depth + users - 1) / users, 4U);
350 return __blk_mq_active_requests(hctx) < depth;
351}
352
353
354#endif