1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * The Kyber I/O scheduler. Controls latency by throttling queue depths using 4 * scalable techniques. 5 * 6 * Copyright (C) 2017 Facebook 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/blkdev.h> 11 #include <linux/blk-mq.h> 12 #include <linux/elevator.h> 13 #include <linux/module.h> 14 #include <linux/sbitmap.h> 15 16 #include <trace/events/block.h> 17 18 #include "blk.h" 19 #include "blk-mq.h" 20 #include "blk-mq-debugfs.h" 21 #include "blk-mq-sched.h" 22 #include "blk-mq-tag.h" 23 24 #define CREATE_TRACE_POINTS 25 #include <trace/events/kyber.h> 26 27 /* 28 * Scheduling domains: the device is divided into multiple domains based on the 29 * request type. 30 */ 31 enum { 32 KYBER_READ, 33 KYBER_WRITE, 34 KYBER_DISCARD, 35 KYBER_OTHER, 36 KYBER_NUM_DOMAINS, 37 }; 38 39 static const char *kyber_domain_names[] = { 40 [KYBER_READ] = "READ", 41 [KYBER_WRITE] = "WRITE", 42 [KYBER_DISCARD] = "DISCARD", 43 [KYBER_OTHER] = "OTHER", 44 }; 45 46 enum { 47 /* 48 * In order to prevent starvation of synchronous requests by a flood of 49 * asynchronous requests, we reserve 25% of requests for synchronous 50 * operations. 51 */ 52 KYBER_ASYNC_PERCENT = 75, 53 }; 54 55 /* 56 * Maximum device-wide depth for each scheduling domain. 57 * 58 * Even for fast devices with lots of tags like NVMe, you can saturate the 59 * device with only a fraction of the maximum possible queue depth. So, we cap 60 * these to a reasonable value. 61 */ 62 static const unsigned int kyber_depth[] = { 63 [KYBER_READ] = 256, 64 [KYBER_WRITE] = 128, 65 [KYBER_DISCARD] = 64, 66 [KYBER_OTHER] = 16, 67 }; 68 69 /* 70 * Default latency targets for each scheduling domain. 71 */ 72 static const u64 kyber_latency_targets[] = { 73 [KYBER_READ] = 2ULL * NSEC_PER_MSEC, 74 [KYBER_WRITE] = 10ULL * NSEC_PER_MSEC, 75 [KYBER_DISCARD] = 5ULL * NSEC_PER_SEC, 76 }; 77 78 /* 79 * Batch size (number of requests we'll dispatch in a row) for each scheduling 80 * domain. 81 */ 82 static const unsigned int kyber_batch_size[] = { 83 [KYBER_READ] = 16, 84 [KYBER_WRITE] = 8, 85 [KYBER_DISCARD] = 1, 86 [KYBER_OTHER] = 1, 87 }; 88 89 /* 90 * Requests latencies are recorded in a histogram with buckets defined relative 91 * to the target latency: 92 * 93 * <= 1/4 * target latency 94 * <= 1/2 * target latency 95 * <= 3/4 * target latency 96 * <= target latency 97 * <= 1 1/4 * target latency 98 * <= 1 1/2 * target latency 99 * <= 1 3/4 * target latency 100 * > 1 3/4 * target latency 101 */ 102 enum { 103 /* 104 * The width of the latency histogram buckets is 105 * 1 / (1 << KYBER_LATENCY_SHIFT) * target latency. 106 */ 107 KYBER_LATENCY_SHIFT = 2, 108 /* 109 * The first (1 << KYBER_LATENCY_SHIFT) buckets are <= target latency, 110 * thus, "good". 111 */ 112 KYBER_GOOD_BUCKETS = 1 << KYBER_LATENCY_SHIFT, 113 /* There are also (1 << KYBER_LATENCY_SHIFT) "bad" buckets. */ 114 KYBER_LATENCY_BUCKETS = 2 << KYBER_LATENCY_SHIFT, 115 }; 116 117 /* 118 * We measure both the total latency and the I/O latency (i.e., latency after 119 * submitting to the device). 120 */ 121 enum { 122 KYBER_TOTAL_LATENCY, 123 KYBER_IO_LATENCY, 124 }; 125 126 static const char *kyber_latency_type_names[] = { 127 [KYBER_TOTAL_LATENCY] = "total", 128 [KYBER_IO_LATENCY] = "I/O", 129 }; 130 131 /* 132 * Per-cpu latency histograms: total latency and I/O latency for each scheduling 133 * domain except for KYBER_OTHER. 134 */ 135 struct kyber_cpu_latency { 136 atomic_t buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS]; 137 }; 138 139 /* 140 * There is a same mapping between ctx & hctx and kcq & khd, 141 * we use request->mq_ctx->index_hw to index the kcq in khd. 142 */ 143 struct kyber_ctx_queue { 144 /* 145 * Used to ensure operations on rq_list and kcq_map to be an atmoic one. 146 * Also protect the rqs on rq_list when merge. 147 */ 148 spinlock_t lock; 149 struct list_head rq_list[KYBER_NUM_DOMAINS]; 150 } ____cacheline_aligned_in_smp; 151 152 struct kyber_queue_data { 153 struct request_queue *q; 154 155 /* 156 * Each scheduling domain has a limited number of in-flight requests 157 * device-wide, limited by these tokens. 158 */ 159 struct sbitmap_queue domain_tokens[KYBER_NUM_DOMAINS]; 160 161 /* 162 * Async request percentage, converted to per-word depth for 163 * sbitmap_get_shallow(). 164 */ 165 unsigned int async_depth; 166 167 struct kyber_cpu_latency __percpu *cpu_latency; 168 169 /* Timer for stats aggregation and adjusting domain tokens. */ 170 struct timer_list timer; 171 172 unsigned int latency_buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS]; 173 174 unsigned long latency_timeout[KYBER_OTHER]; 175 176 int domain_p99[KYBER_OTHER]; 177 178 /* Target latencies in nanoseconds. */ 179 u64 latency_targets[KYBER_OTHER]; 180 }; 181 182 struct kyber_hctx_data { 183 spinlock_t lock; 184 struct list_head rqs[KYBER_NUM_DOMAINS]; 185 unsigned int cur_domain; 186 unsigned int batching; 187 struct kyber_ctx_queue *kcqs; 188 struct sbitmap kcq_map[KYBER_NUM_DOMAINS]; 189 struct sbq_wait domain_wait[KYBER_NUM_DOMAINS]; 190 struct sbq_wait_state *domain_ws[KYBER_NUM_DOMAINS]; 191 atomic_t wait_index[KYBER_NUM_DOMAINS]; 192 }; 193 194 static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags, 195 void *key); 196 197 static unsigned int kyber_sched_domain(unsigned int op) 198 { 199 switch (op & REQ_OP_MASK) { 200 case REQ_OP_READ: 201 return KYBER_READ; 202 case REQ_OP_WRITE: 203 return KYBER_WRITE; 204 case REQ_OP_DISCARD: 205 return KYBER_DISCARD; 206 default: 207 return KYBER_OTHER; 208 } 209 } 210 211 static void flush_latency_buckets(struct kyber_queue_data *kqd, 212 struct kyber_cpu_latency *cpu_latency, 213 unsigned int sched_domain, unsigned int type) 214 { 215 unsigned int *buckets = kqd->latency_buckets[sched_domain][type]; 216 atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type]; 217 unsigned int bucket; 218 219 for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++) 220 buckets[bucket] += atomic_xchg(&cpu_buckets[bucket], 0); 221 } 222 223 /* 224 * Calculate the histogram bucket with the given percentile rank, or -1 if there 225 * aren't enough samples yet. 226 */ 227 static int calculate_percentile(struct kyber_queue_data *kqd, 228 unsigned int sched_domain, unsigned int type, 229 unsigned int percentile) 230 { 231 unsigned int *buckets = kqd->latency_buckets[sched_domain][type]; 232 unsigned int bucket, samples = 0, percentile_samples; 233 234 for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++) 235 samples += buckets[bucket]; 236 237 if (!samples) 238 return -1; 239 240 /* 241 * We do the calculation once we have 500 samples or one second passes 242 * since the first sample was recorded, whichever comes first. 243 */ 244 if (!kqd->latency_timeout[sched_domain]) 245 kqd->latency_timeout[sched_domain] = max(jiffies + HZ, 1UL); 246 if (samples < 500 && 247 time_is_after_jiffies(kqd->latency_timeout[sched_domain])) { 248 return -1; 249 } 250 kqd->latency_timeout[sched_domain] = 0; 251 252 percentile_samples = DIV_ROUND_UP(samples * percentile, 100); 253 for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS - 1; bucket++) { 254 if (buckets[bucket] >= percentile_samples) 255 break; 256 percentile_samples -= buckets[bucket]; 257 } 258 memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type])); 259 260 trace_kyber_latency(kqd->q, kyber_domain_names[sched_domain], 261 kyber_latency_type_names[type], percentile, 262 bucket + 1, 1 << KYBER_LATENCY_SHIFT, samples); 263 264 return bucket; 265 } 266 267 static void kyber_resize_domain(struct kyber_queue_data *kqd, 268 unsigned int sched_domain, unsigned int depth) 269 { 270 depth = clamp(depth, 1U, kyber_depth[sched_domain]); 271 if (depth != kqd->domain_tokens[sched_domain].sb.depth) { 272 sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth); 273 trace_kyber_adjust(kqd->q, kyber_domain_names[sched_domain], 274 depth); 275 } 276 } 277 278 static void kyber_timer_fn(struct timer_list *t) 279 { 280 struct kyber_queue_data *kqd = from_timer(kqd, t, timer); 281 unsigned int sched_domain; 282 int cpu; 283 bool bad = false; 284 285 /* Sum all of the per-cpu latency histograms. */ 286 for_each_online_cpu(cpu) { 287 struct kyber_cpu_latency *cpu_latency; 288 289 cpu_latency = per_cpu_ptr(kqd->cpu_latency, cpu); 290 for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) { 291 flush_latency_buckets(kqd, cpu_latency, sched_domain, 292 KYBER_TOTAL_LATENCY); 293 flush_latency_buckets(kqd, cpu_latency, sched_domain, 294 KYBER_IO_LATENCY); 295 } 296 } 297 298 /* 299 * Check if any domains have a high I/O latency, which might indicate 300 * congestion in the device. Note that we use the p90; we don't want to 301 * be too sensitive to outliers here. 302 */ 303 for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) { 304 int p90; 305 306 p90 = calculate_percentile(kqd, sched_domain, KYBER_IO_LATENCY, 307 90); 308 if (p90 >= KYBER_GOOD_BUCKETS) 309 bad = true; 310 } 311 312 /* 313 * Adjust the scheduling domain depths. If we determined that there was 314 * congestion, we throttle all domains with good latencies. Either way, 315 * we ease up on throttling domains with bad latencies. 316 */ 317 for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) { 318 unsigned int orig_depth, depth; 319 int p99; 320 321 p99 = calculate_percentile(kqd, sched_domain, 322 KYBER_TOTAL_LATENCY, 99); 323 /* 324 * This is kind of subtle: different domains will not 325 * necessarily have enough samples to calculate the latency 326 * percentiles during the same window, so we have to remember 327 * the p99 for the next time we observe congestion; once we do, 328 * we don't want to throttle again until we get more data, so we 329 * reset it to -1. 330 */ 331 if (bad) { 332 if (p99 < 0) 333 p99 = kqd->domain_p99[sched_domain]; 334 kqd->domain_p99[sched_domain] = -1; 335 } else if (p99 >= 0) { 336 kqd->domain_p99[sched_domain] = p99; 337 } 338 if (p99 < 0) 339 continue; 340 341 /* 342 * If this domain has bad latency, throttle less. Otherwise, 343 * throttle more iff we determined that there is congestion. 344 * 345 * The new depth is scaled linearly with the p99 latency vs the 346 * latency target. E.g., if the p99 is 3/4 of the target, then 347 * we throttle down to 3/4 of the current depth, and if the p99 348 * is 2x the target, then we double the depth. 349 */ 350 if (bad || p99 >= KYBER_GOOD_BUCKETS) { 351 orig_depth = kqd->domain_tokens[sched_domain].sb.depth; 352 depth = (orig_depth * (p99 + 1)) >> KYBER_LATENCY_SHIFT; 353 kyber_resize_domain(kqd, sched_domain, depth); 354 } 355 } 356 } 357 358 static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q) 359 { 360 struct kyber_queue_data *kqd; 361 int ret = -ENOMEM; 362 int i; 363 364 kqd = kzalloc_node(sizeof(*kqd), GFP_KERNEL, q->node); 365 if (!kqd) 366 goto err; 367 368 kqd->q = q; 369 370 kqd->cpu_latency = alloc_percpu_gfp(struct kyber_cpu_latency, 371 GFP_KERNEL | __GFP_ZERO); 372 if (!kqd->cpu_latency) 373 goto err_kqd; 374 375 timer_setup(&kqd->timer, kyber_timer_fn, 0); 376 377 for (i = 0; i < KYBER_NUM_DOMAINS; i++) { 378 WARN_ON(!kyber_depth[i]); 379 WARN_ON(!kyber_batch_size[i]); 380 ret = sbitmap_queue_init_node(&kqd->domain_tokens[i], 381 kyber_depth[i], -1, false, 382 GFP_KERNEL, q->node); 383 if (ret) { 384 while (--i >= 0) 385 sbitmap_queue_free(&kqd->domain_tokens[i]); 386 goto err_buckets; 387 } 388 } 389 390 for (i = 0; i < KYBER_OTHER; i++) { 391 kqd->domain_p99[i] = -1; 392 kqd->latency_targets[i] = kyber_latency_targets[i]; 393 } 394 395 return kqd; 396 397 err_buckets: 398 free_percpu(kqd->cpu_latency); 399 err_kqd: 400 kfree(kqd); 401 err: 402 return ERR_PTR(ret); 403 } 404 405 static int kyber_init_sched(struct request_queue *q, struct elevator_type *e) 406 { 407 struct kyber_queue_data *kqd; 408 struct elevator_queue *eq; 409 410 eq = elevator_alloc(q, e); 411 if (!eq) 412 return -ENOMEM; 413 414 kqd = kyber_queue_data_alloc(q); 415 if (IS_ERR(kqd)) { 416 kobject_put(&eq->kobj); 417 return PTR_ERR(kqd); 418 } 419 420 blk_stat_enable_accounting(q); 421 422 eq->elevator_data = kqd; 423 q->elevator = eq; 424 425 return 0; 426 } 427 428 static void kyber_exit_sched(struct elevator_queue *e) 429 { 430 struct kyber_queue_data *kqd = e->elevator_data; 431 int i; 432 433 del_timer_sync(&kqd->timer); 434 435 for (i = 0; i < KYBER_NUM_DOMAINS; i++) 436 sbitmap_queue_free(&kqd->domain_tokens[i]); 437 free_percpu(kqd->cpu_latency); 438 kfree(kqd); 439 } 440 441 static void kyber_ctx_queue_init(struct kyber_ctx_queue *kcq) 442 { 443 unsigned int i; 444 445 spin_lock_init(&kcq->lock); 446 for (i = 0; i < KYBER_NUM_DOMAINS; i++) 447 INIT_LIST_HEAD(&kcq->rq_list[i]); 448 } 449 450 static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx) 451 { 452 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data; 453 struct blk_mq_tags *tags = hctx->sched_tags; 454 unsigned int shift = tags->bitmap_tags->sb.shift; 455 456 kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U; 457 458 sbitmap_queue_min_shallow_depth(tags->bitmap_tags, kqd->async_depth); 459 } 460 461 static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 462 { 463 struct kyber_hctx_data *khd; 464 int i; 465 466 khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node); 467 if (!khd) 468 return -ENOMEM; 469 470 khd->kcqs = kmalloc_array_node(hctx->nr_ctx, 471 sizeof(struct kyber_ctx_queue), 472 GFP_KERNEL, hctx->numa_node); 473 if (!khd->kcqs) 474 goto err_khd; 475 476 for (i = 0; i < hctx->nr_ctx; i++) 477 kyber_ctx_queue_init(&khd->kcqs[i]); 478 479 for (i = 0; i < KYBER_NUM_DOMAINS; i++) { 480 if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx, 481 ilog2(8), GFP_KERNEL, hctx->numa_node, 482 false, false)) { 483 while (--i >= 0) 484 sbitmap_free(&khd->kcq_map[i]); 485 goto err_kcqs; 486 } 487 } 488 489 spin_lock_init(&khd->lock); 490 491 for (i = 0; i < KYBER_NUM_DOMAINS; i++) { 492 INIT_LIST_HEAD(&khd->rqs[i]); 493 khd->domain_wait[i].sbq = NULL; 494 init_waitqueue_func_entry(&khd->domain_wait[i].wait, 495 kyber_domain_wake); 496 khd->domain_wait[i].wait.private = hctx; 497 INIT_LIST_HEAD(&khd->domain_wait[i].wait.entry); 498 atomic_set(&khd->wait_index[i], 0); 499 } 500 501 khd->cur_domain = 0; 502 khd->batching = 0; 503 504 hctx->sched_data = khd; 505 kyber_depth_updated(hctx); 506 507 return 0; 508 509 err_kcqs: 510 kfree(khd->kcqs); 511 err_khd: 512 kfree(khd); 513 return -ENOMEM; 514 } 515 516 static void kyber_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 517 { 518 struct kyber_hctx_data *khd = hctx->sched_data; 519 int i; 520 521 for (i = 0; i < KYBER_NUM_DOMAINS; i++) 522 sbitmap_free(&khd->kcq_map[i]); 523 kfree(khd->kcqs); 524 kfree(hctx->sched_data); 525 } 526 527 static int rq_get_domain_token(struct request *rq) 528 { 529 return (long)rq->elv.priv[0]; 530 } 531 532 static void rq_set_domain_token(struct request *rq, int token) 533 { 534 rq->elv.priv[0] = (void *)(long)token; 535 } 536 537 static void rq_clear_domain_token(struct kyber_queue_data *kqd, 538 struct request *rq) 539 { 540 unsigned int sched_domain; 541 int nr; 542 543 nr = rq_get_domain_token(rq); 544 if (nr != -1) { 545 sched_domain = kyber_sched_domain(rq->cmd_flags); 546 sbitmap_queue_clear(&kqd->domain_tokens[sched_domain], nr, 547 rq->mq_ctx->cpu); 548 } 549 } 550 551 static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) 552 { 553 /* 554 * We use the scheduler tags as per-hardware queue queueing tokens. 555 * Async requests can be limited at this stage. 556 */ 557 if (!op_is_sync(op)) { 558 struct kyber_queue_data *kqd = data->q->elevator->elevator_data; 559 560 data->shallow_depth = kqd->async_depth; 561 } 562 } 563 564 static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio, 565 unsigned int nr_segs) 566 { 567 struct kyber_hctx_data *khd = hctx->sched_data; 568 struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue); 569 struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]]; 570 unsigned int sched_domain = kyber_sched_domain(bio->bi_opf); 571 struct list_head *rq_list = &kcq->rq_list[sched_domain]; 572 bool merged; 573 574 spin_lock(&kcq->lock); 575 merged = blk_bio_list_merge(hctx->queue, rq_list, bio, nr_segs); 576 spin_unlock(&kcq->lock); 577 578 return merged; 579 } 580 581 static void kyber_prepare_request(struct request *rq) 582 { 583 rq_set_domain_token(rq, -1); 584 } 585 586 static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx, 587 struct list_head *rq_list, bool at_head) 588 { 589 struct kyber_hctx_data *khd = hctx->sched_data; 590 struct request *rq, *next; 591 592 list_for_each_entry_safe(rq, next, rq_list, queuelist) { 593 unsigned int sched_domain = kyber_sched_domain(rq->cmd_flags); 594 struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw[hctx->type]]; 595 struct list_head *head = &kcq->rq_list[sched_domain]; 596 597 spin_lock(&kcq->lock); 598 if (at_head) 599 list_move(&rq->queuelist, head); 600 else 601 list_move_tail(&rq->queuelist, head); 602 sbitmap_set_bit(&khd->kcq_map[sched_domain], 603 rq->mq_ctx->index_hw[hctx->type]); 604 trace_block_rq_insert(rq); 605 spin_unlock(&kcq->lock); 606 } 607 } 608 609 static void kyber_finish_request(struct request *rq) 610 { 611 struct kyber_queue_data *kqd = rq->q->elevator->elevator_data; 612 613 rq_clear_domain_token(kqd, rq); 614 } 615 616 static void add_latency_sample(struct kyber_cpu_latency *cpu_latency, 617 unsigned int sched_domain, unsigned int type, 618 u64 target, u64 latency) 619 { 620 unsigned int bucket; 621 u64 divisor; 622 623 if (latency > 0) { 624 divisor = max_t(u64, target >> KYBER_LATENCY_SHIFT, 1); 625 bucket = min_t(unsigned int, div64_u64(latency - 1, divisor), 626 KYBER_LATENCY_BUCKETS - 1); 627 } else { 628 bucket = 0; 629 } 630 631 atomic_inc(&cpu_latency->buckets[sched_domain][type][bucket]); 632 } 633 634 static void kyber_completed_request(struct request *rq, u64 now) 635 { 636 struct kyber_queue_data *kqd = rq->q->elevator->elevator_data; 637 struct kyber_cpu_latency *cpu_latency; 638 unsigned int sched_domain; 639 u64 target; 640 641 sched_domain = kyber_sched_domain(rq->cmd_flags); 642 if (sched_domain == KYBER_OTHER) 643 return; 644 645 cpu_latency = get_cpu_ptr(kqd->cpu_latency); 646 target = kqd->latency_targets[sched_domain]; 647 add_latency_sample(cpu_latency, sched_domain, KYBER_TOTAL_LATENCY, 648 target, now - rq->start_time_ns); 649 add_latency_sample(cpu_latency, sched_domain, KYBER_IO_LATENCY, target, 650 now - rq->io_start_time_ns); 651 put_cpu_ptr(kqd->cpu_latency); 652 653 timer_reduce(&kqd->timer, jiffies + HZ / 10); 654 } 655 656 struct flush_kcq_data { 657 struct kyber_hctx_data *khd; 658 unsigned int sched_domain; 659 struct list_head *list; 660 }; 661 662 static bool flush_busy_kcq(struct sbitmap *sb, unsigned int bitnr, void *data) 663 { 664 struct flush_kcq_data *flush_data = data; 665 struct kyber_ctx_queue *kcq = &flush_data->khd->kcqs[bitnr]; 666 667 spin_lock(&kcq->lock); 668 list_splice_tail_init(&kcq->rq_list[flush_data->sched_domain], 669 flush_data->list); 670 sbitmap_clear_bit(sb, bitnr); 671 spin_unlock(&kcq->lock); 672 673 return true; 674 } 675 676 static void kyber_flush_busy_kcqs(struct kyber_hctx_data *khd, 677 unsigned int sched_domain, 678 struct list_head *list) 679 { 680 struct flush_kcq_data data = { 681 .khd = khd, 682 .sched_domain = sched_domain, 683 .list = list, 684 }; 685 686 sbitmap_for_each_set(&khd->kcq_map[sched_domain], 687 flush_busy_kcq, &data); 688 } 689 690 static int kyber_domain_wake(wait_queue_entry_t *wqe, unsigned mode, int flags, 691 void *key) 692 { 693 struct blk_mq_hw_ctx *hctx = READ_ONCE(wqe->private); 694 struct sbq_wait *wait = container_of(wqe, struct sbq_wait, wait); 695 696 sbitmap_del_wait_queue(wait); 697 blk_mq_run_hw_queue(hctx, true); 698 return 1; 699 } 700 701 static int kyber_get_domain_token(struct kyber_queue_data *kqd, 702 struct kyber_hctx_data *khd, 703 struct blk_mq_hw_ctx *hctx) 704 { 705 unsigned int sched_domain = khd->cur_domain; 706 struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain]; 707 struct sbq_wait *wait = &khd->domain_wait[sched_domain]; 708 struct sbq_wait_state *ws; 709 int nr; 710 711 nr = __sbitmap_queue_get(domain_tokens); 712 713 /* 714 * If we failed to get a domain token, make sure the hardware queue is 715 * run when one becomes available. Note that this is serialized on 716 * khd->lock, but we still need to be careful about the waker. 717 */ 718 if (nr < 0 && list_empty_careful(&wait->wait.entry)) { 719 ws = sbq_wait_ptr(domain_tokens, 720 &khd->wait_index[sched_domain]); 721 khd->domain_ws[sched_domain] = ws; 722 sbitmap_add_wait_queue(domain_tokens, ws, wait); 723 724 /* 725 * Try again in case a token was freed before we got on the wait 726 * queue. 727 */ 728 nr = __sbitmap_queue_get(domain_tokens); 729 } 730 731 /* 732 * If we got a token while we were on the wait queue, remove ourselves 733 * from the wait queue to ensure that all wake ups make forward 734 * progress. It's possible that the waker already deleted the entry 735 * between the !list_empty_careful() check and us grabbing the lock, but 736 * list_del_init() is okay with that. 737 */ 738 if (nr >= 0 && !list_empty_careful(&wait->wait.entry)) { 739 ws = khd->domain_ws[sched_domain]; 740 spin_lock_irq(&ws->wait.lock); 741 sbitmap_del_wait_queue(wait); 742 spin_unlock_irq(&ws->wait.lock); 743 } 744 745 return nr; 746 } 747 748 static struct request * 749 kyber_dispatch_cur_domain(struct kyber_queue_data *kqd, 750 struct kyber_hctx_data *khd, 751 struct blk_mq_hw_ctx *hctx) 752 { 753 struct list_head *rqs; 754 struct request *rq; 755 int nr; 756 757 rqs = &khd->rqs[khd->cur_domain]; 758 759 /* 760 * If we already have a flushed request, then we just need to get a 761 * token for it. Otherwise, if there are pending requests in the kcqs, 762 * flush the kcqs, but only if we can get a token. If not, we should 763 * leave the requests in the kcqs so that they can be merged. Note that 764 * khd->lock serializes the flushes, so if we observed any bit set in 765 * the kcq_map, we will always get a request. 766 */ 767 rq = list_first_entry_or_null(rqs, struct request, queuelist); 768 if (rq) { 769 nr = kyber_get_domain_token(kqd, khd, hctx); 770 if (nr >= 0) { 771 khd->batching++; 772 rq_set_domain_token(rq, nr); 773 list_del_init(&rq->queuelist); 774 return rq; 775 } else { 776 trace_kyber_throttled(kqd->q, 777 kyber_domain_names[khd->cur_domain]); 778 } 779 } else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) { 780 nr = kyber_get_domain_token(kqd, khd, hctx); 781 if (nr >= 0) { 782 kyber_flush_busy_kcqs(khd, khd->cur_domain, rqs); 783 rq = list_first_entry(rqs, struct request, queuelist); 784 khd->batching++; 785 rq_set_domain_token(rq, nr); 786 list_del_init(&rq->queuelist); 787 return rq; 788 } else { 789 trace_kyber_throttled(kqd->q, 790 kyber_domain_names[khd->cur_domain]); 791 } 792 } 793 794 /* There were either no pending requests or no tokens. */ 795 return NULL; 796 } 797 798 static struct request *kyber_dispatch_request(struct blk_mq_hw_ctx *hctx) 799 { 800 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data; 801 struct kyber_hctx_data *khd = hctx->sched_data; 802 struct request *rq; 803 int i; 804 805 spin_lock(&khd->lock); 806 807 /* 808 * First, if we are still entitled to batch, try to dispatch a request 809 * from the batch. 810 */ 811 if (khd->batching < kyber_batch_size[khd->cur_domain]) { 812 rq = kyber_dispatch_cur_domain(kqd, khd, hctx); 813 if (rq) 814 goto out; 815 } 816 817 /* 818 * Either, 819 * 1. We were no longer entitled to a batch. 820 * 2. The domain we were batching didn't have any requests. 821 * 3. The domain we were batching was out of tokens. 822 * 823 * Start another batch. Note that this wraps back around to the original 824 * domain if no other domains have requests or tokens. 825 */ 826 khd->batching = 0; 827 for (i = 0; i < KYBER_NUM_DOMAINS; i++) { 828 if (khd->cur_domain == KYBER_NUM_DOMAINS - 1) 829 khd->cur_domain = 0; 830 else 831 khd->cur_domain++; 832 833 rq = kyber_dispatch_cur_domain(kqd, khd, hctx); 834 if (rq) 835 goto out; 836 } 837 838 rq = NULL; 839 out: 840 spin_unlock(&khd->lock); 841 return rq; 842 } 843 844 static bool kyber_has_work(struct blk_mq_hw_ctx *hctx) 845 { 846 struct kyber_hctx_data *khd = hctx->sched_data; 847 int i; 848 849 for (i = 0; i < KYBER_NUM_DOMAINS; i++) { 850 if (!list_empty_careful(&khd->rqs[i]) || 851 sbitmap_any_bit_set(&khd->kcq_map[i])) 852 return true; 853 } 854 855 return false; 856 } 857 858 #define KYBER_LAT_SHOW_STORE(domain, name) \ 859 static ssize_t kyber_##name##_lat_show(struct elevator_queue *e, \ 860 char *page) \ 861 { \ 862 struct kyber_queue_data *kqd = e->elevator_data; \ 863 \ 864 return sprintf(page, "%llu\n", kqd->latency_targets[domain]); \ 865 } \ 866 \ 867 static ssize_t kyber_##name##_lat_store(struct elevator_queue *e, \ 868 const char *page, size_t count) \ 869 { \ 870 struct kyber_queue_data *kqd = e->elevator_data; \ 871 unsigned long long nsec; \ 872 int ret; \ 873 \ 874 ret = kstrtoull(page, 10, &nsec); \ 875 if (ret) \ 876 return ret; \ 877 \ 878 kqd->latency_targets[domain] = nsec; \ 879 \ 880 return count; \ 881 } 882 KYBER_LAT_SHOW_STORE(KYBER_READ, read); 883 KYBER_LAT_SHOW_STORE(KYBER_WRITE, write); 884 #undef KYBER_LAT_SHOW_STORE 885 886 #define KYBER_LAT_ATTR(op) __ATTR(op##_lat_nsec, 0644, kyber_##op##_lat_show, kyber_##op##_lat_store) 887 static struct elv_fs_entry kyber_sched_attrs[] = { 888 KYBER_LAT_ATTR(read), 889 KYBER_LAT_ATTR(write), 890 __ATTR_NULL 891 }; 892 #undef KYBER_LAT_ATTR 893 894 #ifdef CONFIG_BLK_DEBUG_FS 895 #define KYBER_DEBUGFS_DOMAIN_ATTRS(domain, name) \ 896 static int kyber_##name##_tokens_show(void *data, struct seq_file *m) \ 897 { \ 898 struct request_queue *q = data; \ 899 struct kyber_queue_data *kqd = q->elevator->elevator_data; \ 900 \ 901 sbitmap_queue_show(&kqd->domain_tokens[domain], m); \ 902 return 0; \ 903 } \ 904 \ 905 static void *kyber_##name##_rqs_start(struct seq_file *m, loff_t *pos) \ 906 __acquires(&khd->lock) \ 907 { \ 908 struct blk_mq_hw_ctx *hctx = m->private; \ 909 struct kyber_hctx_data *khd = hctx->sched_data; \ 910 \ 911 spin_lock(&khd->lock); \ 912 return seq_list_start(&khd->rqs[domain], *pos); \ 913 } \ 914 \ 915 static void *kyber_##name##_rqs_next(struct seq_file *m, void *v, \ 916 loff_t *pos) \ 917 { \ 918 struct blk_mq_hw_ctx *hctx = m->private; \ 919 struct kyber_hctx_data *khd = hctx->sched_data; \ 920 \ 921 return seq_list_next(v, &khd->rqs[domain], pos); \ 922 } \ 923 \ 924 static void kyber_##name##_rqs_stop(struct seq_file *m, void *v) \ 925 __releases(&khd->lock) \ 926 { \ 927 struct blk_mq_hw_ctx *hctx = m->private; \ 928 struct kyber_hctx_data *khd = hctx->sched_data; \ 929 \ 930 spin_unlock(&khd->lock); \ 931 } \ 932 \ 933 static const struct seq_operations kyber_##name##_rqs_seq_ops = { \ 934 .start = kyber_##name##_rqs_start, \ 935 .next = kyber_##name##_rqs_next, \ 936 .stop = kyber_##name##_rqs_stop, \ 937 .show = blk_mq_debugfs_rq_show, \ 938 }; \ 939 \ 940 static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \ 941 { \ 942 struct blk_mq_hw_ctx *hctx = data; \ 943 struct kyber_hctx_data *khd = hctx->sched_data; \ 944 wait_queue_entry_t *wait = &khd->domain_wait[domain].wait; \ 945 \ 946 seq_printf(m, "%d\n", !list_empty_careful(&wait->entry)); \ 947 return 0; \ 948 } 949 KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read) 950 KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_WRITE, write) 951 KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_DISCARD, discard) 952 KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other) 953 #undef KYBER_DEBUGFS_DOMAIN_ATTRS 954 955 static int kyber_async_depth_show(void *data, struct seq_file *m) 956 { 957 struct request_queue *q = data; 958 struct kyber_queue_data *kqd = q->elevator->elevator_data; 959 960 seq_printf(m, "%u\n", kqd->async_depth); 961 return 0; 962 } 963 964 static int kyber_cur_domain_show(void *data, struct seq_file *m) 965 { 966 struct blk_mq_hw_ctx *hctx = data; 967 struct kyber_hctx_data *khd = hctx->sched_data; 968 969 seq_printf(m, "%s\n", kyber_domain_names[khd->cur_domain]); 970 return 0; 971 } 972 973 static int kyber_batching_show(void *data, struct seq_file *m) 974 { 975 struct blk_mq_hw_ctx *hctx = data; 976 struct kyber_hctx_data *khd = hctx->sched_data; 977 978 seq_printf(m, "%u\n", khd->batching); 979 return 0; 980 } 981 982 #define KYBER_QUEUE_DOMAIN_ATTRS(name) \ 983 {#name "_tokens", 0400, kyber_##name##_tokens_show} 984 static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs[] = { 985 KYBER_QUEUE_DOMAIN_ATTRS(read), 986 KYBER_QUEUE_DOMAIN_ATTRS(write), 987 KYBER_QUEUE_DOMAIN_ATTRS(discard), 988 KYBER_QUEUE_DOMAIN_ATTRS(other), 989 {"async_depth", 0400, kyber_async_depth_show}, 990 {}, 991 }; 992 #undef KYBER_QUEUE_DOMAIN_ATTRS 993 994 #define KYBER_HCTX_DOMAIN_ATTRS(name) \ 995 {#name "_rqs", 0400, .seq_ops = &kyber_##name##_rqs_seq_ops}, \ 996 {#name "_waiting", 0400, kyber_##name##_waiting_show} 997 static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs[] = { 998 KYBER_HCTX_DOMAIN_ATTRS(read), 999 KYBER_HCTX_DOMAIN_ATTRS(write), 1000 KYBER_HCTX_DOMAIN_ATTRS(discard), 1001 KYBER_HCTX_DOMAIN_ATTRS(other), 1002 {"cur_domain", 0400, kyber_cur_domain_show}, 1003 {"batching", 0400, kyber_batching_show}, 1004 {}, 1005 }; 1006 #undef KYBER_HCTX_DOMAIN_ATTRS 1007 #endif 1008 1009 static struct elevator_type kyber_sched = { 1010 .ops = { 1011 .init_sched = kyber_init_sched, 1012 .exit_sched = kyber_exit_sched, 1013 .init_hctx = kyber_init_hctx, 1014 .exit_hctx = kyber_exit_hctx, 1015 .limit_depth = kyber_limit_depth, 1016 .bio_merge = kyber_bio_merge, 1017 .prepare_request = kyber_prepare_request, 1018 .insert_requests = kyber_insert_requests, 1019 .finish_request = kyber_finish_request, 1020 .requeue_request = kyber_finish_request, 1021 .completed_request = kyber_completed_request, 1022 .dispatch_request = kyber_dispatch_request, 1023 .has_work = kyber_has_work, 1024 .depth_updated = kyber_depth_updated, 1025 }, 1026 #ifdef CONFIG_BLK_DEBUG_FS 1027 .queue_debugfs_attrs = kyber_queue_debugfs_attrs, 1028 .hctx_debugfs_attrs = kyber_hctx_debugfs_attrs, 1029 #endif 1030 .elevator_attrs = kyber_sched_attrs, 1031 .elevator_name = "kyber", 1032 .elevator_features = ELEVATOR_F_MQ_AWARE, 1033 .elevator_owner = THIS_MODULE, 1034 }; 1035 1036 static int __init kyber_init(void) 1037 { 1038 return elv_register(&kyber_sched); 1039 } 1040 1041 static void __exit kyber_exit(void) 1042 { 1043 elv_unregister(&kyber_sched); 1044 } 1045 1046 module_init(kyber_init); 1047 module_exit(kyber_exit); 1048 1049 MODULE_AUTHOR("Omar Sandoval"); 1050 MODULE_LICENSE("GPL"); 1051 MODULE_DESCRIPTION("Kyber I/O scheduler"); 1052