1 /* 2 * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread 3 * over multiple cachelines to avoid ping-pong between multiple submitters 4 * or submitter and completer. Uses rolling wakeups to avoid falling of 5 * the scaling cliff when we run out of tags and have to start putting 6 * submitters to sleep. 7 * 8 * Uses active queue tracking to support fairer distribution of tags 9 * between multiple submitters when a shared tag map is used. 10 * 11 * Copyright (C) 2013-2014 Jens Axboe 12 */ 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/random.h> 16 17 #include <linux/blk-mq.h> 18 #include "blk.h" 19 #include "blk-mq.h" 20 #include "blk-mq-tag.h" 21 22 static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt) 23 { 24 int i; 25 26 for (i = 0; i < bt->map_nr; i++) { 27 struct blk_align_bitmap *bm = &bt->map[i]; 28 int ret; 29 30 ret = find_first_zero_bit(&bm->word, bm->depth); 31 if (ret < bm->depth) 32 return true; 33 } 34 35 return false; 36 } 37 38 bool blk_mq_has_free_tags(struct blk_mq_tags *tags) 39 { 40 if (!tags) 41 return true; 42 43 return bt_has_free_tags(&tags->bitmap_tags); 44 } 45 46 static inline int bt_index_inc(int index) 47 { 48 return (index + 1) & (BT_WAIT_QUEUES - 1); 49 } 50 51 static inline void bt_index_atomic_inc(atomic_t *index) 52 { 53 int old = atomic_read(index); 54 int new = bt_index_inc(old); 55 atomic_cmpxchg(index, old, new); 56 } 57 58 /* 59 * If a previously inactive queue goes active, bump the active user count. 60 */ 61 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) 62 { 63 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && 64 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) 65 atomic_inc(&hctx->tags->active_queues); 66 67 return true; 68 } 69 70 /* 71 * Wakeup all potentially sleeping on normal (non-reserved) tags 72 */ 73 static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags) 74 { 75 struct blk_mq_bitmap_tags *bt; 76 int i, wake_index; 77 78 bt = &tags->bitmap_tags; 79 wake_index = atomic_read(&bt->wake_index); 80 for (i = 0; i < BT_WAIT_QUEUES; i++) { 81 struct bt_wait_state *bs = &bt->bs[wake_index]; 82 83 if (waitqueue_active(&bs->wait)) 84 wake_up(&bs->wait); 85 86 wake_index = bt_index_inc(wake_index); 87 } 88 } 89 90 /* 91 * If a previously busy queue goes inactive, potential waiters could now 92 * be allowed to queue. Wake them up and check. 93 */ 94 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) 95 { 96 struct blk_mq_tags *tags = hctx->tags; 97 98 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) 99 return; 100 101 atomic_dec(&tags->active_queues); 102 103 blk_mq_tag_wakeup_all(tags); 104 } 105 106 /* 107 * For shared tag users, we track the number of currently active users 108 * and attempt to provide a fair share of the tag depth for each of them. 109 */ 110 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, 111 struct blk_mq_bitmap_tags *bt) 112 { 113 unsigned int depth, users; 114 115 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) 116 return true; 117 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) 118 return true; 119 120 /* 121 * Don't try dividing an ant 122 */ 123 if (bt->depth == 1) 124 return true; 125 126 users = atomic_read(&hctx->tags->active_queues); 127 if (!users) 128 return true; 129 130 /* 131 * Allow at least some tags 132 */ 133 depth = max((bt->depth + users - 1) / users, 4U); 134 return atomic_read(&hctx->nr_active) < depth; 135 } 136 137 static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag) 138 { 139 int tag, org_last_tag, end; 140 bool wrap = last_tag != 0; 141 142 org_last_tag = last_tag; 143 end = bm->depth; 144 do { 145 restart: 146 tag = find_next_zero_bit(&bm->word, end, last_tag); 147 if (unlikely(tag >= end)) { 148 /* 149 * We started with an offset, start from 0 to 150 * exhaust the map. 151 */ 152 if (wrap) { 153 wrap = false; 154 end = org_last_tag; 155 last_tag = 0; 156 goto restart; 157 } 158 return -1; 159 } 160 last_tag = tag + 1; 161 } while (test_and_set_bit(tag, &bm->word)); 162 163 return tag; 164 } 165 166 /* 167 * Straight forward bitmap tag implementation, where each bit is a tag 168 * (cleared == free, and set == busy). The small twist is using per-cpu 169 * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue 170 * contexts. This enables us to drastically limit the space searched, 171 * without dirtying an extra shared cacheline like we would if we stored 172 * the cache value inside the shared blk_mq_bitmap_tags structure. On top 173 * of that, each word of tags is in a separate cacheline. This means that 174 * multiple users will tend to stick to different cachelines, at least 175 * until the map is exhausted. 176 */ 177 static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt, 178 unsigned int *tag_cache) 179 { 180 unsigned int last_tag, org_last_tag; 181 int index, i, tag; 182 183 if (!hctx_may_queue(hctx, bt)) 184 return -1; 185 186 last_tag = org_last_tag = *tag_cache; 187 index = TAG_TO_INDEX(bt, last_tag); 188 189 for (i = 0; i < bt->map_nr; i++) { 190 tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag)); 191 if (tag != -1) { 192 tag += (index << bt->bits_per_word); 193 goto done; 194 } 195 196 last_tag = 0; 197 if (++index >= bt->map_nr) 198 index = 0; 199 } 200 201 *tag_cache = 0; 202 return -1; 203 204 /* 205 * Only update the cache from the allocation path, if we ended 206 * up using the specific cached tag. 207 */ 208 done: 209 if (tag == org_last_tag) { 210 last_tag = tag + 1; 211 if (last_tag >= bt->depth - 1) 212 last_tag = 0; 213 214 *tag_cache = last_tag; 215 } 216 217 return tag; 218 } 219 220 static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt, 221 struct blk_mq_hw_ctx *hctx) 222 { 223 struct bt_wait_state *bs; 224 int wait_index; 225 226 if (!hctx) 227 return &bt->bs[0]; 228 229 wait_index = atomic_read(&hctx->wait_index); 230 bs = &bt->bs[wait_index]; 231 bt_index_atomic_inc(&hctx->wait_index); 232 return bs; 233 } 234 235 static int bt_get(struct blk_mq_alloc_data *data, 236 struct blk_mq_bitmap_tags *bt, 237 struct blk_mq_hw_ctx *hctx, 238 unsigned int *last_tag) 239 { 240 struct bt_wait_state *bs; 241 DEFINE_WAIT(wait); 242 int tag; 243 244 tag = __bt_get(hctx, bt, last_tag); 245 if (tag != -1) 246 return tag; 247 248 if (!(data->gfp & __GFP_WAIT)) 249 return -1; 250 251 do { 252 bs = bt_wait_ptr(bt, hctx); 253 prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE); 254 255 tag = __bt_get(hctx, bt, last_tag); 256 if (tag != -1) 257 break; 258 259 /* 260 * We're out of tags on this hardware queue, kick any 261 * pending IO submits before going to sleep waiting for 262 * some to complete. 263 */ 264 blk_mq_run_hw_queue(hctx, false); 265 266 /* 267 * Retry tag allocation after running the hardware queue, 268 * as running the queue may also have found completions. 269 */ 270 tag = __bt_get(hctx, bt, last_tag); 271 if (tag != -1) 272 break; 273 274 blk_mq_put_ctx(data->ctx); 275 276 io_schedule(); 277 278 data->ctx = blk_mq_get_ctx(data->q); 279 data->hctx = data->q->mq_ops->map_queue(data->q, 280 data->ctx->cpu); 281 if (data->reserved) { 282 bt = &data->hctx->tags->breserved_tags; 283 } else { 284 last_tag = &data->ctx->last_tag; 285 hctx = data->hctx; 286 bt = &hctx->tags->bitmap_tags; 287 } 288 } while (1); 289 290 finish_wait(&bs->wait, &wait); 291 return tag; 292 } 293 294 static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data) 295 { 296 int tag; 297 298 tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx, 299 &data->ctx->last_tag); 300 if (tag >= 0) 301 return tag + data->hctx->tags->nr_reserved_tags; 302 303 return BLK_MQ_TAG_FAIL; 304 } 305 306 static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data) 307 { 308 int tag, zero = 0; 309 310 if (unlikely(!data->hctx->tags->nr_reserved_tags)) { 311 WARN_ON_ONCE(1); 312 return BLK_MQ_TAG_FAIL; 313 } 314 315 tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero); 316 if (tag < 0) 317 return BLK_MQ_TAG_FAIL; 318 319 return tag; 320 } 321 322 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) 323 { 324 if (!data->reserved) 325 return __blk_mq_get_tag(data); 326 327 return __blk_mq_get_reserved_tag(data); 328 } 329 330 static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt) 331 { 332 int i, wake_index; 333 334 wake_index = atomic_read(&bt->wake_index); 335 for (i = 0; i < BT_WAIT_QUEUES; i++) { 336 struct bt_wait_state *bs = &bt->bs[wake_index]; 337 338 if (waitqueue_active(&bs->wait)) { 339 int o = atomic_read(&bt->wake_index); 340 if (wake_index != o) 341 atomic_cmpxchg(&bt->wake_index, o, wake_index); 342 343 return bs; 344 } 345 346 wake_index = bt_index_inc(wake_index); 347 } 348 349 return NULL; 350 } 351 352 static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag) 353 { 354 const int index = TAG_TO_INDEX(bt, tag); 355 struct bt_wait_state *bs; 356 int wait_cnt; 357 358 clear_bit(TAG_TO_BIT(bt, tag), &bt->map[index].word); 359 360 /* Ensure that the wait list checks occur after clear_bit(). */ 361 smp_mb(); 362 363 bs = bt_wake_ptr(bt); 364 if (!bs) 365 return; 366 367 wait_cnt = atomic_dec_return(&bs->wait_cnt); 368 if (unlikely(wait_cnt < 0)) 369 wait_cnt = atomic_inc_return(&bs->wait_cnt); 370 if (wait_cnt == 0) { 371 atomic_add(bt->wake_cnt, &bs->wait_cnt); 372 bt_index_atomic_inc(&bt->wake_index); 373 wake_up(&bs->wait); 374 } 375 } 376 377 void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, 378 unsigned int *last_tag) 379 { 380 struct blk_mq_tags *tags = hctx->tags; 381 382 if (tag >= tags->nr_reserved_tags) { 383 const int real_tag = tag - tags->nr_reserved_tags; 384 385 BUG_ON(real_tag >= tags->nr_tags); 386 bt_clear_tag(&tags->bitmap_tags, real_tag); 387 *last_tag = real_tag; 388 } else { 389 BUG_ON(tag >= tags->nr_reserved_tags); 390 bt_clear_tag(&tags->breserved_tags, tag); 391 } 392 } 393 394 static void bt_for_each(struct blk_mq_hw_ctx *hctx, 395 struct blk_mq_bitmap_tags *bt, unsigned int off, 396 busy_iter_fn *fn, void *data, bool reserved) 397 { 398 struct request *rq; 399 int bit, i; 400 401 for (i = 0; i < bt->map_nr; i++) { 402 struct blk_align_bitmap *bm = &bt->map[i]; 403 404 for (bit = find_first_bit(&bm->word, bm->depth); 405 bit < bm->depth; 406 bit = find_next_bit(&bm->word, bm->depth, bit + 1)) { 407 rq = blk_mq_tag_to_rq(hctx->tags, off + bit); 408 if (rq->q == hctx->queue) 409 fn(hctx, rq, data, reserved); 410 } 411 412 off += (1 << bt->bits_per_word); 413 } 414 } 415 416 void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, 417 void *priv) 418 { 419 struct blk_mq_tags *tags = hctx->tags; 420 421 if (tags->nr_reserved_tags) 422 bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true); 423 bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv, 424 false); 425 } 426 EXPORT_SYMBOL(blk_mq_tag_busy_iter); 427 428 static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt) 429 { 430 unsigned int i, used; 431 432 for (i = 0, used = 0; i < bt->map_nr; i++) { 433 struct blk_align_bitmap *bm = &bt->map[i]; 434 435 used += bitmap_weight(&bm->word, bm->depth); 436 } 437 438 return bt->depth - used; 439 } 440 441 static void bt_update_count(struct blk_mq_bitmap_tags *bt, 442 unsigned int depth) 443 { 444 unsigned int tags_per_word = 1U << bt->bits_per_word; 445 unsigned int map_depth = depth; 446 447 if (depth) { 448 int i; 449 450 for (i = 0; i < bt->map_nr; i++) { 451 bt->map[i].depth = min(map_depth, tags_per_word); 452 map_depth -= bt->map[i].depth; 453 } 454 } 455 456 bt->wake_cnt = BT_WAIT_BATCH; 457 if (bt->wake_cnt > depth / BT_WAIT_QUEUES) 458 bt->wake_cnt = max(1U, depth / BT_WAIT_QUEUES); 459 460 bt->depth = depth; 461 } 462 463 static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth, 464 int node, bool reserved) 465 { 466 int i; 467 468 bt->bits_per_word = ilog2(BITS_PER_LONG); 469 470 /* 471 * Depth can be zero for reserved tags, that's not a failure 472 * condition. 473 */ 474 if (depth) { 475 unsigned int nr, tags_per_word; 476 477 tags_per_word = (1 << bt->bits_per_word); 478 479 /* 480 * If the tag space is small, shrink the number of tags 481 * per word so we spread over a few cachelines, at least. 482 * If less than 4 tags, just forget about it, it's not 483 * going to work optimally anyway. 484 */ 485 if (depth >= 4) { 486 while (tags_per_word * 4 > depth) { 487 bt->bits_per_word--; 488 tags_per_word = (1 << bt->bits_per_word); 489 } 490 } 491 492 nr = ALIGN(depth, tags_per_word) / tags_per_word; 493 bt->map = kzalloc_node(nr * sizeof(struct blk_align_bitmap), 494 GFP_KERNEL, node); 495 if (!bt->map) 496 return -ENOMEM; 497 498 bt->map_nr = nr; 499 } 500 501 bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL); 502 if (!bt->bs) { 503 kfree(bt->map); 504 return -ENOMEM; 505 } 506 507 bt_update_count(bt, depth); 508 509 for (i = 0; i < BT_WAIT_QUEUES; i++) { 510 init_waitqueue_head(&bt->bs[i].wait); 511 atomic_set(&bt->bs[i].wait_cnt, bt->wake_cnt); 512 } 513 514 return 0; 515 } 516 517 static void bt_free(struct blk_mq_bitmap_tags *bt) 518 { 519 kfree(bt->map); 520 kfree(bt->bs); 521 } 522 523 static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, 524 int node) 525 { 526 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; 527 528 if (bt_alloc(&tags->bitmap_tags, depth, node, false)) 529 goto enomem; 530 if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true)) 531 goto enomem; 532 533 return tags; 534 enomem: 535 bt_free(&tags->bitmap_tags); 536 kfree(tags); 537 return NULL; 538 } 539 540 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, 541 unsigned int reserved_tags, int node) 542 { 543 struct blk_mq_tags *tags; 544 545 if (total_tags > BLK_MQ_TAG_MAX) { 546 pr_err("blk-mq: tag depth too large\n"); 547 return NULL; 548 } 549 550 tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node); 551 if (!tags) 552 return NULL; 553 554 tags->nr_tags = total_tags; 555 tags->nr_reserved_tags = reserved_tags; 556 557 return blk_mq_init_bitmap_tags(tags, node); 558 } 559 560 void blk_mq_free_tags(struct blk_mq_tags *tags) 561 { 562 bt_free(&tags->bitmap_tags); 563 bt_free(&tags->breserved_tags); 564 kfree(tags); 565 } 566 567 void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag) 568 { 569 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; 570 571 *tag = prandom_u32() % depth; 572 } 573 574 int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth) 575 { 576 tdepth -= tags->nr_reserved_tags; 577 if (tdepth > tags->nr_tags) 578 return -EINVAL; 579 580 /* 581 * Don't need (or can't) update reserved tags here, they remain 582 * static and should never need resizing. 583 */ 584 bt_update_count(&tags->bitmap_tags, tdepth); 585 blk_mq_tag_wakeup_all(tags); 586 return 0; 587 } 588 589 /** 590 * blk_mq_unique_tag() - return a tag that is unique queue-wide 591 * @rq: request for which to compute a unique tag 592 * 593 * The tag field in struct request is unique per hardware queue but not over 594 * all hardware queues. Hence this function that returns a tag with the 595 * hardware context index in the upper bits and the per hardware queue tag in 596 * the lower bits. 597 * 598 * Note: When called for a request that is queued on a non-multiqueue request 599 * queue, the hardware context index is set to zero. 600 */ 601 u32 blk_mq_unique_tag(struct request *rq) 602 { 603 struct request_queue *q = rq->q; 604 struct blk_mq_hw_ctx *hctx; 605 int hwq = 0; 606 607 if (q->mq_ops) { 608 hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu); 609 hwq = hctx->queue_num; 610 } 611 612 return (hwq << BLK_MQ_UNIQUE_TAG_BITS) | 613 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK); 614 } 615 EXPORT_SYMBOL(blk_mq_unique_tag); 616 617 ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) 618 { 619 char *orig_page = page; 620 unsigned int free, res; 621 622 if (!tags) 623 return 0; 624 625 page += sprintf(page, "nr_tags=%u, reserved_tags=%u, " 626 "bits_per_word=%u\n", 627 tags->nr_tags, tags->nr_reserved_tags, 628 tags->bitmap_tags.bits_per_word); 629 630 free = bt_unused_tags(&tags->bitmap_tags); 631 res = bt_unused_tags(&tags->breserved_tags); 632 633 page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res); 634 page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues)); 635 636 return page - orig_page; 637 } 638