blk-mq.h (34069d12e239ae8f36dd96c378e4622fb1c42a76) blk-mq.h (b8643d682669994b3f57c3440df3d4f9cb735f35)
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef INT_BLK_MQ_H
3#define INT_BLK_MQ_H
4
5#include <linux/blk-mq.h>
6#include "blk-stat.h"
7
8struct blk_mq_tag_set;

--- 257 unchanged lines hidden (view full) ---

266
267static inline int blk_mq_get_rq_budget_token(struct request *rq)
268{
269 if (rq->q->mq_ops->get_rq_budget_token)
270 return rq->q->mq_ops->get_rq_budget_token(rq);
271 return -1;
272}
273
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef INT_BLK_MQ_H
3#define INT_BLK_MQ_H
4
5#include <linux/blk-mq.h>
6#include "blk-stat.h"
7
8struct blk_mq_tag_set;

--- 257 unchanged lines hidden (view full) ---

266
267static inline int blk_mq_get_rq_budget_token(struct request *rq)
268{
269 if (rq->q->mq_ops->get_rq_budget_token)
270 return rq->q->mq_ops->get_rq_budget_token(rq);
271 return -1;
272}
273
274static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
274static inline void __blk_mq_add_active_requests(struct blk_mq_hw_ctx *hctx,
275 int val)
275{
276 if (blk_mq_is_shared_tags(hctx->flags))
276{
277 if (blk_mq_is_shared_tags(hctx->flags))
277 atomic_inc(&hctx->queue->nr_active_requests_shared_tags);
278 atomic_add(val, &hctx->queue->nr_active_requests_shared_tags);
278 else
279 else
279 atomic_inc(&hctx->nr_active);
280 atomic_add(val, &hctx->nr_active);
280}
281
281}
282
283static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
284{
285 __blk_mq_add_active_requests(hctx, 1);
286}
287
282static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
283 int val)
284{
285 if (blk_mq_is_shared_tags(hctx->flags))
286 atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags);
287 else
288 atomic_sub(val, &hctx->nr_active);
289}
290
291static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
292{
293 __blk_mq_sub_active_requests(hctx, 1);
294}
295
288static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
289 int val)
290{
291 if (blk_mq_is_shared_tags(hctx->flags))
292 atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags);
293 else
294 atomic_sub(val, &hctx->nr_active);
295}
296
297static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
298{
299 __blk_mq_sub_active_requests(hctx, 1);
300}
301
302static inline void blk_mq_add_active_requests(struct blk_mq_hw_ctx *hctx,
303 int val)
304{
305 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
306 __blk_mq_add_active_requests(hctx, val);
307}
308
309static inline void blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
310{
311 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
312 __blk_mq_inc_active_requests(hctx);
313}
314
315static inline void blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
316 int val)
317{
318 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
319 __blk_mq_sub_active_requests(hctx, val);
320}
321
322static inline void blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
323{
324 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
325 __blk_mq_dec_active_requests(hctx);
326}
327
296static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
297{
298 if (blk_mq_is_shared_tags(hctx->flags))
299 return atomic_read(&hctx->queue->nr_active_requests_shared_tags);
300 return atomic_read(&hctx->nr_active);
301}
302static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
303 struct request *rq)
304{
328static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
329{
330 if (blk_mq_is_shared_tags(hctx->flags))
331 return atomic_read(&hctx->queue->nr_active_requests_shared_tags);
332 return atomic_read(&hctx->nr_active);
333}
334static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
335 struct request *rq)
336{
337 blk_mq_dec_active_requests(hctx);
305 blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
306 rq->tag = BLK_MQ_NO_TAG;
338 blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
339 rq->tag = BLK_MQ_NO_TAG;
307
308 if (rq->rq_flags & RQF_MQ_INFLIGHT) {
309 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
310 __blk_mq_dec_active_requests(hctx);
311 }
312}
313
314static inline void blk_mq_put_driver_tag(struct request *rq)
315{
316 if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
317 return;
318
319 __blk_mq_put_driver_tag(rq->mq_hctx, rq);
320}
321
340}
341
342static inline void blk_mq_put_driver_tag(struct request *rq)
343{
344 if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
345 return;
346
347 __blk_mq_put_driver_tag(rq->mq_hctx, rq);
348}
349
322bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq);
350bool __blk_mq_alloc_driver_tag(struct request *rq);
323
324static inline bool blk_mq_get_driver_tag(struct request *rq)
325{
326 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
327
351
352static inline bool blk_mq_get_driver_tag(struct request *rq)
353{
354 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
355
328 if (rq->tag != BLK_MQ_NO_TAG &&
329 !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
330 hctx->tags->rqs[rq->tag] = rq;
331 return true;
332 }
356 if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq))
357 return false;
333
358
334 return __blk_mq_get_driver_tag(hctx, rq);
359 hctx->tags->rqs[rq->tag] = rq;
360 return true;
335}
336
337static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
338{
339 int cpu;
340
341 for_each_possible_cpu(cpu)
342 qmap->mq_map[cpu] = 0;

--- 105 unchanged lines hidden ---
361}
362
363static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
364{
365 int cpu;
366
367 for_each_possible_cpu(cpu)
368 qmap->mq_map[cpu] = 0;

--- 105 unchanged lines hidden ---