1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Interface for controlling IO bandwidth on a request queue 4 * 5 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com> 6 */ 7 8 #include <linux/module.h> 9 #include <linux/slab.h> 10 #include <linux/blkdev.h> 11 #include <linux/bio.h> 12 #include <linux/blktrace_api.h> 13 #include "blk.h" 14 #include "blk-cgroup-rwstat.h" 15 #include "blk-stat.h" 16 #include "blk-throttle.h" 17 18 /* Max dispatch from a group in 1 round */ 19 #define THROTL_GRP_QUANTUM 8 20 21 /* Total max dispatch from all groups in one round */ 22 #define THROTL_QUANTUM 32 23 24 /* Throttling is performed over a slice and after that slice is renewed */ 25 #define DFL_THROTL_SLICE_HD (HZ / 10) 26 #define DFL_THROTL_SLICE_SSD (HZ / 50) 27 #define MAX_THROTL_SLICE (HZ) 28 #define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */ 29 #define MIN_THROTL_BPS (320 * 1024) 30 #define MIN_THROTL_IOPS (10) 31 #define DFL_LATENCY_TARGET (-1L) 32 #define DFL_IDLE_THRESHOLD (0) 33 #define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */ 34 #define LATENCY_FILTERED_SSD (0) 35 /* 36 * For HD, very small latency comes from sequential IO. Such IO is helpless to 37 * help determine if its IO is impacted by others, hence we ignore the IO 38 */ 39 #define LATENCY_FILTERED_HD (1000L) /* 1ms */ 40 41 /* A workqueue to queue throttle related work */ 42 static struct workqueue_struct *kthrotld_workqueue; 43 44 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node) 45 46 /* We measure latency for request size from <= 4k to >= 1M */ 47 #define LATENCY_BUCKET_SIZE 9 48 49 struct latency_bucket { 50 unsigned long total_latency; /* ns / 1024 */ 51 int samples; 52 }; 53 54 struct avg_latency_bucket { 55 unsigned long latency; /* ns / 1024 */ 56 bool valid; 57 }; 58 59 struct throtl_data 60 { 61 /* service tree for active throtl groups */ 62 struct throtl_service_queue service_queue; 63 64 struct request_queue *queue; 65 66 /* Total Number of queued bios on READ and WRITE lists */ 67 unsigned int nr_queued[2]; 68 69 unsigned int throtl_slice; 70 71 /* Work for dispatching throttled bios */ 72 struct work_struct dispatch_work; 73 unsigned int limit_index; 74 bool limit_valid[LIMIT_CNT]; 75 76 unsigned long low_upgrade_time; 77 unsigned long low_downgrade_time; 78 79 unsigned int scale; 80 81 struct latency_bucket tmp_buckets[2][LATENCY_BUCKET_SIZE]; 82 struct avg_latency_bucket avg_buckets[2][LATENCY_BUCKET_SIZE]; 83 struct latency_bucket __percpu *latency_buckets[2]; 84 unsigned long last_calculate_time; 85 unsigned long filtered_latency; 86 87 bool track_bio_latency; 88 }; 89 90 static void throtl_pending_timer_fn(struct timer_list *t); 91 92 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg) 93 { 94 return pd_to_blkg(&tg->pd); 95 } 96 97 /** 98 * sq_to_tg - return the throl_grp the specified service queue belongs to 99 * @sq: the throtl_service_queue of interest 100 * 101 * Return the throtl_grp @sq belongs to. If @sq is the top-level one 102 * embedded in throtl_data, %NULL is returned. 103 */ 104 static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq) 105 { 106 if (sq && sq->parent_sq) 107 return container_of(sq, struct throtl_grp, service_queue); 108 else 109 return NULL; 110 } 111 112 /** 113 * sq_to_td - return throtl_data the specified service queue belongs to 114 * @sq: the throtl_service_queue of interest 115 * 116 * A service_queue can be embedded in either a throtl_grp or throtl_data. 117 * Determine the associated throtl_data accordingly and return it. 118 */ 119 static struct throtl_data *sq_to_td(struct throtl_service_queue *sq) 120 { 121 struct throtl_grp *tg = sq_to_tg(sq); 122 123 if (tg) 124 return tg->td; 125 else 126 return container_of(sq, struct throtl_data, service_queue); 127 } 128 129 /* 130 * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to 131 * make the IO dispatch more smooth. 132 * Scale up: linearly scale up according to elapsed time since upgrade. For 133 * every throtl_slice, the limit scales up 1/2 .low limit till the 134 * limit hits .max limit 135 * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit 136 */ 137 static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td) 138 { 139 /* arbitrary value to avoid too big scale */ 140 if (td->scale < 4096 && time_after_eq(jiffies, 141 td->low_upgrade_time + td->scale * td->throtl_slice)) 142 td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice; 143 144 return low + (low >> 1) * td->scale; 145 } 146 147 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw) 148 { 149 struct blkcg_gq *blkg = tg_to_blkg(tg); 150 struct throtl_data *td; 151 uint64_t ret; 152 153 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent) 154 return U64_MAX; 155 156 td = tg->td; 157 ret = tg->bps[rw][td->limit_index]; 158 if (ret == 0 && td->limit_index == LIMIT_LOW) { 159 /* intermediate node or iops isn't 0 */ 160 if (!list_empty(&blkg->blkcg->css.children) || 161 tg->iops[rw][td->limit_index]) 162 return U64_MAX; 163 else 164 return MIN_THROTL_BPS; 165 } 166 167 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] && 168 tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) { 169 uint64_t adjusted; 170 171 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td); 172 ret = min(tg->bps[rw][LIMIT_MAX], adjusted); 173 } 174 return ret; 175 } 176 177 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw) 178 { 179 struct blkcg_gq *blkg = tg_to_blkg(tg); 180 struct throtl_data *td; 181 unsigned int ret; 182 183 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent) 184 return UINT_MAX; 185 186 td = tg->td; 187 ret = tg->iops[rw][td->limit_index]; 188 if (ret == 0 && tg->td->limit_index == LIMIT_LOW) { 189 /* intermediate node or bps isn't 0 */ 190 if (!list_empty(&blkg->blkcg->css.children) || 191 tg->bps[rw][td->limit_index]) 192 return UINT_MAX; 193 else 194 return MIN_THROTL_IOPS; 195 } 196 197 if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] && 198 tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) { 199 uint64_t adjusted; 200 201 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td); 202 if (adjusted > UINT_MAX) 203 adjusted = UINT_MAX; 204 ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted); 205 } 206 return ret; 207 } 208 209 #define request_bucket_index(sectors) \ 210 clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1) 211 212 /** 213 * throtl_log - log debug message via blktrace 214 * @sq: the service_queue being reported 215 * @fmt: printf format string 216 * @args: printf args 217 * 218 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a 219 * throtl_grp; otherwise, just "throtl". 220 */ 221 #define throtl_log(sq, fmt, args...) do { \ 222 struct throtl_grp *__tg = sq_to_tg((sq)); \ 223 struct throtl_data *__td = sq_to_td((sq)); \ 224 \ 225 (void)__td; \ 226 if (likely(!blk_trace_note_message_enabled(__td->queue))) \ 227 break; \ 228 if ((__tg)) { \ 229 blk_add_cgroup_trace_msg(__td->queue, \ 230 &tg_to_blkg(__tg)->blkcg->css, "throtl " fmt, ##args);\ 231 } else { \ 232 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \ 233 } \ 234 } while (0) 235 236 static inline unsigned int throtl_bio_data_size(struct bio *bio) 237 { 238 /* assume it's one sector */ 239 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) 240 return 512; 241 return bio->bi_iter.bi_size; 242 } 243 244 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg) 245 { 246 INIT_LIST_HEAD(&qn->node); 247 bio_list_init(&qn->bios); 248 qn->tg = tg; 249 } 250 251 /** 252 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it 253 * @bio: bio being added 254 * @qn: qnode to add bio to 255 * @queued: the service_queue->queued[] list @qn belongs to 256 * 257 * Add @bio to @qn and put @qn on @queued if it's not already on. 258 * @qn->tg's reference count is bumped when @qn is activated. See the 259 * comment on top of throtl_qnode definition for details. 260 */ 261 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn, 262 struct list_head *queued) 263 { 264 bio_list_add(&qn->bios, bio); 265 if (list_empty(&qn->node)) { 266 list_add_tail(&qn->node, queued); 267 blkg_get(tg_to_blkg(qn->tg)); 268 } 269 } 270 271 /** 272 * throtl_peek_queued - peek the first bio on a qnode list 273 * @queued: the qnode list to peek 274 */ 275 static struct bio *throtl_peek_queued(struct list_head *queued) 276 { 277 struct throtl_qnode *qn; 278 struct bio *bio; 279 280 if (list_empty(queued)) 281 return NULL; 282 283 qn = list_first_entry(queued, struct throtl_qnode, node); 284 bio = bio_list_peek(&qn->bios); 285 WARN_ON_ONCE(!bio); 286 return bio; 287 } 288 289 /** 290 * throtl_pop_queued - pop the first bio form a qnode list 291 * @queued: the qnode list to pop a bio from 292 * @tg_to_put: optional out argument for throtl_grp to put 293 * 294 * Pop the first bio from the qnode list @queued. After popping, the first 295 * qnode is removed from @queued if empty or moved to the end of @queued so 296 * that the popping order is round-robin. 297 * 298 * When the first qnode is removed, its associated throtl_grp should be put 299 * too. If @tg_to_put is NULL, this function automatically puts it; 300 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is 301 * responsible for putting it. 302 */ 303 static struct bio *throtl_pop_queued(struct list_head *queued, 304 struct throtl_grp **tg_to_put) 305 { 306 struct throtl_qnode *qn; 307 struct bio *bio; 308 309 if (list_empty(queued)) 310 return NULL; 311 312 qn = list_first_entry(queued, struct throtl_qnode, node); 313 bio = bio_list_pop(&qn->bios); 314 WARN_ON_ONCE(!bio); 315 316 if (bio_list_empty(&qn->bios)) { 317 list_del_init(&qn->node); 318 if (tg_to_put) 319 *tg_to_put = qn->tg; 320 else 321 blkg_put(tg_to_blkg(qn->tg)); 322 } else { 323 list_move_tail(&qn->node, queued); 324 } 325 326 return bio; 327 } 328 329 /* init a service_queue, assumes the caller zeroed it */ 330 static void throtl_service_queue_init(struct throtl_service_queue *sq) 331 { 332 INIT_LIST_HEAD(&sq->queued[READ]); 333 INIT_LIST_HEAD(&sq->queued[WRITE]); 334 sq->pending_tree = RB_ROOT_CACHED; 335 timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0); 336 } 337 338 static struct blkg_policy_data *throtl_pd_alloc(struct gendisk *disk, 339 struct blkcg *blkcg, gfp_t gfp) 340 { 341 struct throtl_grp *tg; 342 int rw; 343 344 tg = kzalloc_node(sizeof(*tg), gfp, disk->node_id); 345 if (!tg) 346 return NULL; 347 348 if (blkg_rwstat_init(&tg->stat_bytes, gfp)) 349 goto err_free_tg; 350 351 if (blkg_rwstat_init(&tg->stat_ios, gfp)) 352 goto err_exit_stat_bytes; 353 354 throtl_service_queue_init(&tg->service_queue); 355 356 for (rw = READ; rw <= WRITE; rw++) { 357 throtl_qnode_init(&tg->qnode_on_self[rw], tg); 358 throtl_qnode_init(&tg->qnode_on_parent[rw], tg); 359 } 360 361 RB_CLEAR_NODE(&tg->rb_node); 362 tg->bps[READ][LIMIT_MAX] = U64_MAX; 363 tg->bps[WRITE][LIMIT_MAX] = U64_MAX; 364 tg->iops[READ][LIMIT_MAX] = UINT_MAX; 365 tg->iops[WRITE][LIMIT_MAX] = UINT_MAX; 366 tg->bps_conf[READ][LIMIT_MAX] = U64_MAX; 367 tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX; 368 tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX; 369 tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX; 370 /* LIMIT_LOW will have default value 0 */ 371 372 tg->latency_target = DFL_LATENCY_TARGET; 373 tg->latency_target_conf = DFL_LATENCY_TARGET; 374 tg->idletime_threshold = DFL_IDLE_THRESHOLD; 375 tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD; 376 377 return &tg->pd; 378 379 err_exit_stat_bytes: 380 blkg_rwstat_exit(&tg->stat_bytes); 381 err_free_tg: 382 kfree(tg); 383 return NULL; 384 } 385 386 static void throtl_pd_init(struct blkg_policy_data *pd) 387 { 388 struct throtl_grp *tg = pd_to_tg(pd); 389 struct blkcg_gq *blkg = tg_to_blkg(tg); 390 struct throtl_data *td = blkg->q->td; 391 struct throtl_service_queue *sq = &tg->service_queue; 392 393 /* 394 * If on the default hierarchy, we switch to properly hierarchical 395 * behavior where limits on a given throtl_grp are applied to the 396 * whole subtree rather than just the group itself. e.g. If 16M 397 * read_bps limit is set on a parent group, summary bps of 398 * parent group and its subtree groups can't exceed 16M for the 399 * device. 400 * 401 * If not on the default hierarchy, the broken flat hierarchy 402 * behavior is retained where all throtl_grps are treated as if 403 * they're all separate root groups right below throtl_data. 404 * Limits of a group don't interact with limits of other groups 405 * regardless of the position of the group in the hierarchy. 406 */ 407 sq->parent_sq = &td->service_queue; 408 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent) 409 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue; 410 tg->td = td; 411 } 412 413 /* 414 * Set has_rules[] if @tg or any of its parents have limits configured. 415 * This doesn't require walking up to the top of the hierarchy as the 416 * parent's has_rules[] is guaranteed to be correct. 417 */ 418 static void tg_update_has_rules(struct throtl_grp *tg) 419 { 420 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq); 421 struct throtl_data *td = tg->td; 422 int rw; 423 424 for (rw = READ; rw <= WRITE; rw++) { 425 tg->has_rules_iops[rw] = 426 (parent_tg && parent_tg->has_rules_iops[rw]) || 427 (td->limit_valid[td->limit_index] && 428 tg_iops_limit(tg, rw) != UINT_MAX); 429 tg->has_rules_bps[rw] = 430 (parent_tg && parent_tg->has_rules_bps[rw]) || 431 (td->limit_valid[td->limit_index] && 432 (tg_bps_limit(tg, rw) != U64_MAX)); 433 } 434 } 435 436 static void throtl_pd_online(struct blkg_policy_data *pd) 437 { 438 struct throtl_grp *tg = pd_to_tg(pd); 439 /* 440 * We don't want new groups to escape the limits of its ancestors. 441 * Update has_rules[] after a new group is brought online. 442 */ 443 tg_update_has_rules(tg); 444 } 445 446 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 447 static void blk_throtl_update_limit_valid(struct throtl_data *td) 448 { 449 struct cgroup_subsys_state *pos_css; 450 struct blkcg_gq *blkg; 451 bool low_valid = false; 452 453 rcu_read_lock(); 454 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { 455 struct throtl_grp *tg = blkg_to_tg(blkg); 456 457 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] || 458 tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) { 459 low_valid = true; 460 break; 461 } 462 } 463 rcu_read_unlock(); 464 465 td->limit_valid[LIMIT_LOW] = low_valid; 466 } 467 #else 468 static inline void blk_throtl_update_limit_valid(struct throtl_data *td) 469 { 470 } 471 #endif 472 473 static void throtl_upgrade_state(struct throtl_data *td); 474 static void throtl_pd_offline(struct blkg_policy_data *pd) 475 { 476 struct throtl_grp *tg = pd_to_tg(pd); 477 478 tg->bps[READ][LIMIT_LOW] = 0; 479 tg->bps[WRITE][LIMIT_LOW] = 0; 480 tg->iops[READ][LIMIT_LOW] = 0; 481 tg->iops[WRITE][LIMIT_LOW] = 0; 482 483 blk_throtl_update_limit_valid(tg->td); 484 485 if (!tg->td->limit_valid[tg->td->limit_index]) 486 throtl_upgrade_state(tg->td); 487 } 488 489 static void throtl_pd_free(struct blkg_policy_data *pd) 490 { 491 struct throtl_grp *tg = pd_to_tg(pd); 492 493 del_timer_sync(&tg->service_queue.pending_timer); 494 blkg_rwstat_exit(&tg->stat_bytes); 495 blkg_rwstat_exit(&tg->stat_ios); 496 kfree(tg); 497 } 498 499 static struct throtl_grp * 500 throtl_rb_first(struct throtl_service_queue *parent_sq) 501 { 502 struct rb_node *n; 503 504 n = rb_first_cached(&parent_sq->pending_tree); 505 WARN_ON_ONCE(!n); 506 if (!n) 507 return NULL; 508 return rb_entry_tg(n); 509 } 510 511 static void throtl_rb_erase(struct rb_node *n, 512 struct throtl_service_queue *parent_sq) 513 { 514 rb_erase_cached(n, &parent_sq->pending_tree); 515 RB_CLEAR_NODE(n); 516 } 517 518 static void update_min_dispatch_time(struct throtl_service_queue *parent_sq) 519 { 520 struct throtl_grp *tg; 521 522 tg = throtl_rb_first(parent_sq); 523 if (!tg) 524 return; 525 526 parent_sq->first_pending_disptime = tg->disptime; 527 } 528 529 static void tg_service_queue_add(struct throtl_grp *tg) 530 { 531 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq; 532 struct rb_node **node = &parent_sq->pending_tree.rb_root.rb_node; 533 struct rb_node *parent = NULL; 534 struct throtl_grp *__tg; 535 unsigned long key = tg->disptime; 536 bool leftmost = true; 537 538 while (*node != NULL) { 539 parent = *node; 540 __tg = rb_entry_tg(parent); 541 542 if (time_before(key, __tg->disptime)) 543 node = &parent->rb_left; 544 else { 545 node = &parent->rb_right; 546 leftmost = false; 547 } 548 } 549 550 rb_link_node(&tg->rb_node, parent, node); 551 rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree, 552 leftmost); 553 } 554 555 static void throtl_enqueue_tg(struct throtl_grp *tg) 556 { 557 if (!(tg->flags & THROTL_TG_PENDING)) { 558 tg_service_queue_add(tg); 559 tg->flags |= THROTL_TG_PENDING; 560 tg->service_queue.parent_sq->nr_pending++; 561 } 562 } 563 564 static void throtl_dequeue_tg(struct throtl_grp *tg) 565 { 566 if (tg->flags & THROTL_TG_PENDING) { 567 struct throtl_service_queue *parent_sq = 568 tg->service_queue.parent_sq; 569 570 throtl_rb_erase(&tg->rb_node, parent_sq); 571 --parent_sq->nr_pending; 572 tg->flags &= ~THROTL_TG_PENDING; 573 } 574 } 575 576 /* Call with queue lock held */ 577 static void throtl_schedule_pending_timer(struct throtl_service_queue *sq, 578 unsigned long expires) 579 { 580 unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice; 581 582 /* 583 * Since we are adjusting the throttle limit dynamically, the sleep 584 * time calculated according to previous limit might be invalid. It's 585 * possible the cgroup sleep time is very long and no other cgroups 586 * have IO running so notify the limit changes. Make sure the cgroup 587 * doesn't sleep too long to avoid the missed notification. 588 */ 589 if (time_after(expires, max_expire)) 590 expires = max_expire; 591 mod_timer(&sq->pending_timer, expires); 592 throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu", 593 expires - jiffies, jiffies); 594 } 595 596 /** 597 * throtl_schedule_next_dispatch - schedule the next dispatch cycle 598 * @sq: the service_queue to schedule dispatch for 599 * @force: force scheduling 600 * 601 * Arm @sq->pending_timer so that the next dispatch cycle starts on the 602 * dispatch time of the first pending child. Returns %true if either timer 603 * is armed or there's no pending child left. %false if the current 604 * dispatch window is still open and the caller should continue 605 * dispatching. 606 * 607 * If @force is %true, the dispatch timer is always scheduled and this 608 * function is guaranteed to return %true. This is to be used when the 609 * caller can't dispatch itself and needs to invoke pending_timer 610 * unconditionally. Note that forced scheduling is likely to induce short 611 * delay before dispatch starts even if @sq->first_pending_disptime is not 612 * in the future and thus shouldn't be used in hot paths. 613 */ 614 static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq, 615 bool force) 616 { 617 /* any pending children left? */ 618 if (!sq->nr_pending) 619 return true; 620 621 update_min_dispatch_time(sq); 622 623 /* is the next dispatch time in the future? */ 624 if (force || time_after(sq->first_pending_disptime, jiffies)) { 625 throtl_schedule_pending_timer(sq, sq->first_pending_disptime); 626 return true; 627 } 628 629 /* tell the caller to continue dispatching */ 630 return false; 631 } 632 633 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg, 634 bool rw, unsigned long start) 635 { 636 tg->bytes_disp[rw] = 0; 637 tg->io_disp[rw] = 0; 638 tg->carryover_bytes[rw] = 0; 639 tg->carryover_ios[rw] = 0; 640 641 /* 642 * Previous slice has expired. We must have trimmed it after last 643 * bio dispatch. That means since start of last slice, we never used 644 * that bandwidth. Do try to make use of that bandwidth while giving 645 * credit. 646 */ 647 if (time_after(start, tg->slice_start[rw])) 648 tg->slice_start[rw] = start; 649 650 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; 651 throtl_log(&tg->service_queue, 652 "[%c] new slice with credit start=%lu end=%lu jiffies=%lu", 653 rw == READ ? 'R' : 'W', tg->slice_start[rw], 654 tg->slice_end[rw], jiffies); 655 } 656 657 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw, 658 bool clear_carryover) 659 { 660 tg->bytes_disp[rw] = 0; 661 tg->io_disp[rw] = 0; 662 tg->slice_start[rw] = jiffies; 663 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; 664 if (clear_carryover) { 665 tg->carryover_bytes[rw] = 0; 666 tg->carryover_ios[rw] = 0; 667 } 668 669 throtl_log(&tg->service_queue, 670 "[%c] new slice start=%lu end=%lu jiffies=%lu", 671 rw == READ ? 'R' : 'W', tg->slice_start[rw], 672 tg->slice_end[rw], jiffies); 673 } 674 675 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw, 676 unsigned long jiffy_end) 677 { 678 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice); 679 } 680 681 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw, 682 unsigned long jiffy_end) 683 { 684 throtl_set_slice_end(tg, rw, jiffy_end); 685 throtl_log(&tg->service_queue, 686 "[%c] extend slice start=%lu end=%lu jiffies=%lu", 687 rw == READ ? 'R' : 'W', tg->slice_start[rw], 688 tg->slice_end[rw], jiffies); 689 } 690 691 /* Determine if previously allocated or extended slice is complete or not */ 692 static bool throtl_slice_used(struct throtl_grp *tg, bool rw) 693 { 694 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw])) 695 return false; 696 697 return true; 698 } 699 700 /* Trim the used slices and adjust slice start accordingly */ 701 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw) 702 { 703 unsigned long nr_slices, time_elapsed, io_trim; 704 u64 bytes_trim, tmp; 705 706 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw])); 707 708 /* 709 * If bps are unlimited (-1), then time slice don't get 710 * renewed. Don't try to trim the slice if slice is used. A new 711 * slice will start when appropriate. 712 */ 713 if (throtl_slice_used(tg, rw)) 714 return; 715 716 /* 717 * A bio has been dispatched. Also adjust slice_end. It might happen 718 * that initially cgroup limit was very low resulting in high 719 * slice_end, but later limit was bumped up and bio was dispatched 720 * sooner, then we need to reduce slice_end. A high bogus slice_end 721 * is bad because it does not allow new slice to start. 722 */ 723 724 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice); 725 726 time_elapsed = jiffies - tg->slice_start[rw]; 727 728 nr_slices = time_elapsed / tg->td->throtl_slice; 729 730 if (!nr_slices) 731 return; 732 tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices; 733 do_div(tmp, HZ); 734 bytes_trim = tmp; 735 736 io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) / 737 HZ; 738 739 if (!bytes_trim && !io_trim) 740 return; 741 742 if (tg->bytes_disp[rw] >= bytes_trim) 743 tg->bytes_disp[rw] -= bytes_trim; 744 else 745 tg->bytes_disp[rw] = 0; 746 747 if (tg->io_disp[rw] >= io_trim) 748 tg->io_disp[rw] -= io_trim; 749 else 750 tg->io_disp[rw] = 0; 751 752 tg->slice_start[rw] += nr_slices * tg->td->throtl_slice; 753 754 throtl_log(&tg->service_queue, 755 "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu", 756 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim, 757 tg->slice_start[rw], tg->slice_end[rw], jiffies); 758 } 759 760 static unsigned int calculate_io_allowed(u32 iops_limit, 761 unsigned long jiffy_elapsed) 762 { 763 unsigned int io_allowed; 764 u64 tmp; 765 766 /* 767 * jiffy_elapsed should not be a big value as minimum iops can be 768 * 1 then at max jiffy elapsed should be equivalent of 1 second as we 769 * will allow dispatch after 1 second and after that slice should 770 * have been trimmed. 771 */ 772 773 tmp = (u64)iops_limit * jiffy_elapsed; 774 do_div(tmp, HZ); 775 776 if (tmp > UINT_MAX) 777 io_allowed = UINT_MAX; 778 else 779 io_allowed = tmp; 780 781 return io_allowed; 782 } 783 784 static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed) 785 { 786 return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ); 787 } 788 789 static void __tg_update_carryover(struct throtl_grp *tg, bool rw) 790 { 791 unsigned long jiffy_elapsed = jiffies - tg->slice_start[rw]; 792 u64 bps_limit = tg_bps_limit(tg, rw); 793 u32 iops_limit = tg_iops_limit(tg, rw); 794 795 /* 796 * If config is updated while bios are still throttled, calculate and 797 * accumulate how many bytes/ios are waited across changes. And 798 * carryover_bytes/ios will be used to calculate new wait time under new 799 * configuration. 800 */ 801 if (bps_limit != U64_MAX) 802 tg->carryover_bytes[rw] += 803 calculate_bytes_allowed(bps_limit, jiffy_elapsed) - 804 tg->bytes_disp[rw]; 805 if (iops_limit != UINT_MAX) 806 tg->carryover_ios[rw] += 807 calculate_io_allowed(iops_limit, jiffy_elapsed) - 808 tg->io_disp[rw]; 809 } 810 811 static void tg_update_carryover(struct throtl_grp *tg) 812 { 813 if (tg->service_queue.nr_queued[READ]) 814 __tg_update_carryover(tg, READ); 815 if (tg->service_queue.nr_queued[WRITE]) 816 __tg_update_carryover(tg, WRITE); 817 818 /* see comments in struct throtl_grp for meaning of these fields. */ 819 throtl_log(&tg->service_queue, "%s: %llu %llu %u %u\n", __func__, 820 tg->carryover_bytes[READ], tg->carryover_bytes[WRITE], 821 tg->carryover_ios[READ], tg->carryover_ios[WRITE]); 822 } 823 824 static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio, 825 u32 iops_limit) 826 { 827 bool rw = bio_data_dir(bio); 828 unsigned int io_allowed; 829 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; 830 831 if (iops_limit == UINT_MAX) { 832 return 0; 833 } 834 835 jiffy_elapsed = jiffies - tg->slice_start[rw]; 836 837 /* Round up to the next throttle slice, wait time must be nonzero */ 838 jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice); 839 io_allowed = calculate_io_allowed(iops_limit, jiffy_elapsed_rnd) + 840 tg->carryover_ios[rw]; 841 if (tg->io_disp[rw] + 1 <= io_allowed) { 842 return 0; 843 } 844 845 /* Calc approx time to dispatch */ 846 jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed; 847 return jiffy_wait; 848 } 849 850 static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio, 851 u64 bps_limit) 852 { 853 bool rw = bio_data_dir(bio); 854 u64 bytes_allowed, extra_bytes; 855 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; 856 unsigned int bio_size = throtl_bio_data_size(bio); 857 858 /* no need to throttle if this bio's bytes have been accounted */ 859 if (bps_limit == U64_MAX || bio_flagged(bio, BIO_BPS_THROTTLED)) { 860 return 0; 861 } 862 863 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; 864 865 /* Slice has just started. Consider one slice interval */ 866 if (!jiffy_elapsed) 867 jiffy_elapsed_rnd = tg->td->throtl_slice; 868 869 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); 870 bytes_allowed = calculate_bytes_allowed(bps_limit, jiffy_elapsed_rnd) + 871 tg->carryover_bytes[rw]; 872 if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) { 873 return 0; 874 } 875 876 /* Calc approx time to dispatch */ 877 extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed; 878 jiffy_wait = div64_u64(extra_bytes * HZ, bps_limit); 879 880 if (!jiffy_wait) 881 jiffy_wait = 1; 882 883 /* 884 * This wait time is without taking into consideration the rounding 885 * up we did. Add that time also. 886 */ 887 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed); 888 return jiffy_wait; 889 } 890 891 /* 892 * Returns whether one can dispatch a bio or not. Also returns approx number 893 * of jiffies to wait before this bio is with-in IO rate and can be dispatched 894 */ 895 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, 896 unsigned long *wait) 897 { 898 bool rw = bio_data_dir(bio); 899 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0; 900 u64 bps_limit = tg_bps_limit(tg, rw); 901 u32 iops_limit = tg_iops_limit(tg, rw); 902 903 /* 904 * Currently whole state machine of group depends on first bio 905 * queued in the group bio list. So one should not be calling 906 * this function with a different bio if there are other bios 907 * queued. 908 */ 909 BUG_ON(tg->service_queue.nr_queued[rw] && 910 bio != throtl_peek_queued(&tg->service_queue.queued[rw])); 911 912 /* If tg->bps = -1, then BW is unlimited */ 913 if ((bps_limit == U64_MAX && iops_limit == UINT_MAX) || 914 tg->flags & THROTL_TG_CANCELING) { 915 if (wait) 916 *wait = 0; 917 return true; 918 } 919 920 /* 921 * If previous slice expired, start a new one otherwise renew/extend 922 * existing slice to make sure it is at least throtl_slice interval 923 * long since now. New slice is started only for empty throttle group. 924 * If there is queued bio, that means there should be an active 925 * slice and it should be extended instead. 926 */ 927 if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw])) 928 throtl_start_new_slice(tg, rw, true); 929 else { 930 if (time_before(tg->slice_end[rw], 931 jiffies + tg->td->throtl_slice)) 932 throtl_extend_slice(tg, rw, 933 jiffies + tg->td->throtl_slice); 934 } 935 936 bps_wait = tg_within_bps_limit(tg, bio, bps_limit); 937 iops_wait = tg_within_iops_limit(tg, bio, iops_limit); 938 if (bps_wait + iops_wait == 0) { 939 if (wait) 940 *wait = 0; 941 return true; 942 } 943 944 max_wait = max(bps_wait, iops_wait); 945 946 if (wait) 947 *wait = max_wait; 948 949 if (time_before(tg->slice_end[rw], jiffies + max_wait)) 950 throtl_extend_slice(tg, rw, jiffies + max_wait); 951 952 return false; 953 } 954 955 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) 956 { 957 bool rw = bio_data_dir(bio); 958 unsigned int bio_size = throtl_bio_data_size(bio); 959 960 /* Charge the bio to the group */ 961 if (!bio_flagged(bio, BIO_BPS_THROTTLED)) { 962 tg->bytes_disp[rw] += bio_size; 963 tg->last_bytes_disp[rw] += bio_size; 964 } 965 966 tg->io_disp[rw]++; 967 tg->last_io_disp[rw]++; 968 } 969 970 /** 971 * throtl_add_bio_tg - add a bio to the specified throtl_grp 972 * @bio: bio to add 973 * @qn: qnode to use 974 * @tg: the target throtl_grp 975 * 976 * Add @bio to @tg's service_queue using @qn. If @qn is not specified, 977 * tg->qnode_on_self[] is used. 978 */ 979 static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn, 980 struct throtl_grp *tg) 981 { 982 struct throtl_service_queue *sq = &tg->service_queue; 983 bool rw = bio_data_dir(bio); 984 985 if (!qn) 986 qn = &tg->qnode_on_self[rw]; 987 988 /* 989 * If @tg doesn't currently have any bios queued in the same 990 * direction, queueing @bio can change when @tg should be 991 * dispatched. Mark that @tg was empty. This is automatically 992 * cleared on the next tg_update_disptime(). 993 */ 994 if (!sq->nr_queued[rw]) 995 tg->flags |= THROTL_TG_WAS_EMPTY; 996 997 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]); 998 999 sq->nr_queued[rw]++; 1000 throtl_enqueue_tg(tg); 1001 } 1002 1003 static void tg_update_disptime(struct throtl_grp *tg) 1004 { 1005 struct throtl_service_queue *sq = &tg->service_queue; 1006 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime; 1007 struct bio *bio; 1008 1009 bio = throtl_peek_queued(&sq->queued[READ]); 1010 if (bio) 1011 tg_may_dispatch(tg, bio, &read_wait); 1012 1013 bio = throtl_peek_queued(&sq->queued[WRITE]); 1014 if (bio) 1015 tg_may_dispatch(tg, bio, &write_wait); 1016 1017 min_wait = min(read_wait, write_wait); 1018 disptime = jiffies + min_wait; 1019 1020 /* Update dispatch time */ 1021 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq); 1022 tg->disptime = disptime; 1023 tg_service_queue_add(tg); 1024 1025 /* see throtl_add_bio_tg() */ 1026 tg->flags &= ~THROTL_TG_WAS_EMPTY; 1027 } 1028 1029 static void start_parent_slice_with_credit(struct throtl_grp *child_tg, 1030 struct throtl_grp *parent_tg, bool rw) 1031 { 1032 if (throtl_slice_used(parent_tg, rw)) { 1033 throtl_start_new_slice_with_credit(parent_tg, rw, 1034 child_tg->slice_start[rw]); 1035 } 1036 1037 } 1038 1039 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw) 1040 { 1041 struct throtl_service_queue *sq = &tg->service_queue; 1042 struct throtl_service_queue *parent_sq = sq->parent_sq; 1043 struct throtl_grp *parent_tg = sq_to_tg(parent_sq); 1044 struct throtl_grp *tg_to_put = NULL; 1045 struct bio *bio; 1046 1047 /* 1048 * @bio is being transferred from @tg to @parent_sq. Popping a bio 1049 * from @tg may put its reference and @parent_sq might end up 1050 * getting released prematurely. Remember the tg to put and put it 1051 * after @bio is transferred to @parent_sq. 1052 */ 1053 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put); 1054 sq->nr_queued[rw]--; 1055 1056 throtl_charge_bio(tg, bio); 1057 1058 /* 1059 * If our parent is another tg, we just need to transfer @bio to 1060 * the parent using throtl_add_bio_tg(). If our parent is 1061 * @td->service_queue, @bio is ready to be issued. Put it on its 1062 * bio_lists[] and decrease total number queued. The caller is 1063 * responsible for issuing these bios. 1064 */ 1065 if (parent_tg) { 1066 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg); 1067 start_parent_slice_with_credit(tg, parent_tg, rw); 1068 } else { 1069 bio_set_flag(bio, BIO_BPS_THROTTLED); 1070 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw], 1071 &parent_sq->queued[rw]); 1072 BUG_ON(tg->td->nr_queued[rw] <= 0); 1073 tg->td->nr_queued[rw]--; 1074 } 1075 1076 throtl_trim_slice(tg, rw); 1077 1078 if (tg_to_put) 1079 blkg_put(tg_to_blkg(tg_to_put)); 1080 } 1081 1082 static int throtl_dispatch_tg(struct throtl_grp *tg) 1083 { 1084 struct throtl_service_queue *sq = &tg->service_queue; 1085 unsigned int nr_reads = 0, nr_writes = 0; 1086 unsigned int max_nr_reads = THROTL_GRP_QUANTUM * 3 / 4; 1087 unsigned int max_nr_writes = THROTL_GRP_QUANTUM - max_nr_reads; 1088 struct bio *bio; 1089 1090 /* Try to dispatch 75% READS and 25% WRITES */ 1091 1092 while ((bio = throtl_peek_queued(&sq->queued[READ])) && 1093 tg_may_dispatch(tg, bio, NULL)) { 1094 1095 tg_dispatch_one_bio(tg, bio_data_dir(bio)); 1096 nr_reads++; 1097 1098 if (nr_reads >= max_nr_reads) 1099 break; 1100 } 1101 1102 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) && 1103 tg_may_dispatch(tg, bio, NULL)) { 1104 1105 tg_dispatch_one_bio(tg, bio_data_dir(bio)); 1106 nr_writes++; 1107 1108 if (nr_writes >= max_nr_writes) 1109 break; 1110 } 1111 1112 return nr_reads + nr_writes; 1113 } 1114 1115 static int throtl_select_dispatch(struct throtl_service_queue *parent_sq) 1116 { 1117 unsigned int nr_disp = 0; 1118 1119 while (1) { 1120 struct throtl_grp *tg; 1121 struct throtl_service_queue *sq; 1122 1123 if (!parent_sq->nr_pending) 1124 break; 1125 1126 tg = throtl_rb_first(parent_sq); 1127 if (!tg) 1128 break; 1129 1130 if (time_before(jiffies, tg->disptime)) 1131 break; 1132 1133 nr_disp += throtl_dispatch_tg(tg); 1134 1135 sq = &tg->service_queue; 1136 if (sq->nr_queued[READ] || sq->nr_queued[WRITE]) 1137 tg_update_disptime(tg); 1138 else 1139 throtl_dequeue_tg(tg); 1140 1141 if (nr_disp >= THROTL_QUANTUM) 1142 break; 1143 } 1144 1145 return nr_disp; 1146 } 1147 1148 static bool throtl_can_upgrade(struct throtl_data *td, 1149 struct throtl_grp *this_tg); 1150 /** 1151 * throtl_pending_timer_fn - timer function for service_queue->pending_timer 1152 * @t: the pending_timer member of the throtl_service_queue being serviced 1153 * 1154 * This timer is armed when a child throtl_grp with active bio's become 1155 * pending and queued on the service_queue's pending_tree and expires when 1156 * the first child throtl_grp should be dispatched. This function 1157 * dispatches bio's from the children throtl_grps to the parent 1158 * service_queue. 1159 * 1160 * If the parent's parent is another throtl_grp, dispatching is propagated 1161 * by either arming its pending_timer or repeating dispatch directly. If 1162 * the top-level service_tree is reached, throtl_data->dispatch_work is 1163 * kicked so that the ready bio's are issued. 1164 */ 1165 static void throtl_pending_timer_fn(struct timer_list *t) 1166 { 1167 struct throtl_service_queue *sq = from_timer(sq, t, pending_timer); 1168 struct throtl_grp *tg = sq_to_tg(sq); 1169 struct throtl_data *td = sq_to_td(sq); 1170 struct throtl_service_queue *parent_sq; 1171 struct request_queue *q; 1172 bool dispatched; 1173 int ret; 1174 1175 /* throtl_data may be gone, so figure out request queue by blkg */ 1176 if (tg) 1177 q = tg->pd.blkg->q; 1178 else 1179 q = td->queue; 1180 1181 spin_lock_irq(&q->queue_lock); 1182 1183 if (!q->root_blkg) 1184 goto out_unlock; 1185 1186 if (throtl_can_upgrade(td, NULL)) 1187 throtl_upgrade_state(td); 1188 1189 again: 1190 parent_sq = sq->parent_sq; 1191 dispatched = false; 1192 1193 while (true) { 1194 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u", 1195 sq->nr_queued[READ] + sq->nr_queued[WRITE], 1196 sq->nr_queued[READ], sq->nr_queued[WRITE]); 1197 1198 ret = throtl_select_dispatch(sq); 1199 if (ret) { 1200 throtl_log(sq, "bios disp=%u", ret); 1201 dispatched = true; 1202 } 1203 1204 if (throtl_schedule_next_dispatch(sq, false)) 1205 break; 1206 1207 /* this dispatch windows is still open, relax and repeat */ 1208 spin_unlock_irq(&q->queue_lock); 1209 cpu_relax(); 1210 spin_lock_irq(&q->queue_lock); 1211 } 1212 1213 if (!dispatched) 1214 goto out_unlock; 1215 1216 if (parent_sq) { 1217 /* @parent_sq is another throl_grp, propagate dispatch */ 1218 if (tg->flags & THROTL_TG_WAS_EMPTY) { 1219 tg_update_disptime(tg); 1220 if (!throtl_schedule_next_dispatch(parent_sq, false)) { 1221 /* window is already open, repeat dispatching */ 1222 sq = parent_sq; 1223 tg = sq_to_tg(sq); 1224 goto again; 1225 } 1226 } 1227 } else { 1228 /* reached the top-level, queue issuing */ 1229 queue_work(kthrotld_workqueue, &td->dispatch_work); 1230 } 1231 out_unlock: 1232 spin_unlock_irq(&q->queue_lock); 1233 } 1234 1235 /** 1236 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work 1237 * @work: work item being executed 1238 * 1239 * This function is queued for execution when bios reach the bio_lists[] 1240 * of throtl_data->service_queue. Those bios are ready and issued by this 1241 * function. 1242 */ 1243 static void blk_throtl_dispatch_work_fn(struct work_struct *work) 1244 { 1245 struct throtl_data *td = container_of(work, struct throtl_data, 1246 dispatch_work); 1247 struct throtl_service_queue *td_sq = &td->service_queue; 1248 struct request_queue *q = td->queue; 1249 struct bio_list bio_list_on_stack; 1250 struct bio *bio; 1251 struct blk_plug plug; 1252 int rw; 1253 1254 bio_list_init(&bio_list_on_stack); 1255 1256 spin_lock_irq(&q->queue_lock); 1257 for (rw = READ; rw <= WRITE; rw++) 1258 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL))) 1259 bio_list_add(&bio_list_on_stack, bio); 1260 spin_unlock_irq(&q->queue_lock); 1261 1262 if (!bio_list_empty(&bio_list_on_stack)) { 1263 blk_start_plug(&plug); 1264 while ((bio = bio_list_pop(&bio_list_on_stack))) 1265 submit_bio_noacct_nocheck(bio); 1266 blk_finish_plug(&plug); 1267 } 1268 } 1269 1270 static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd, 1271 int off) 1272 { 1273 struct throtl_grp *tg = pd_to_tg(pd); 1274 u64 v = *(u64 *)((void *)tg + off); 1275 1276 if (v == U64_MAX) 1277 return 0; 1278 return __blkg_prfill_u64(sf, pd, v); 1279 } 1280 1281 static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd, 1282 int off) 1283 { 1284 struct throtl_grp *tg = pd_to_tg(pd); 1285 unsigned int v = *(unsigned int *)((void *)tg + off); 1286 1287 if (v == UINT_MAX) 1288 return 0; 1289 return __blkg_prfill_u64(sf, pd, v); 1290 } 1291 1292 static int tg_print_conf_u64(struct seq_file *sf, void *v) 1293 { 1294 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64, 1295 &blkcg_policy_throtl, seq_cft(sf)->private, false); 1296 return 0; 1297 } 1298 1299 static int tg_print_conf_uint(struct seq_file *sf, void *v) 1300 { 1301 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint, 1302 &blkcg_policy_throtl, seq_cft(sf)->private, false); 1303 return 0; 1304 } 1305 1306 static void tg_conf_updated(struct throtl_grp *tg, bool global) 1307 { 1308 struct throtl_service_queue *sq = &tg->service_queue; 1309 struct cgroup_subsys_state *pos_css; 1310 struct blkcg_gq *blkg; 1311 1312 throtl_log(&tg->service_queue, 1313 "limit change rbps=%llu wbps=%llu riops=%u wiops=%u", 1314 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE), 1315 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE)); 1316 1317 /* 1318 * Update has_rules[] flags for the updated tg's subtree. A tg is 1319 * considered to have rules if either the tg itself or any of its 1320 * ancestors has rules. This identifies groups without any 1321 * restrictions in the whole hierarchy and allows them to bypass 1322 * blk-throttle. 1323 */ 1324 blkg_for_each_descendant_pre(blkg, pos_css, 1325 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) { 1326 struct throtl_grp *this_tg = blkg_to_tg(blkg); 1327 struct throtl_grp *parent_tg; 1328 1329 tg_update_has_rules(this_tg); 1330 /* ignore root/second level */ 1331 if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent || 1332 !blkg->parent->parent) 1333 continue; 1334 parent_tg = blkg_to_tg(blkg->parent); 1335 /* 1336 * make sure all children has lower idle time threshold and 1337 * higher latency target 1338 */ 1339 this_tg->idletime_threshold = min(this_tg->idletime_threshold, 1340 parent_tg->idletime_threshold); 1341 this_tg->latency_target = max(this_tg->latency_target, 1342 parent_tg->latency_target); 1343 } 1344 1345 /* 1346 * We're already holding queue_lock and know @tg is valid. Let's 1347 * apply the new config directly. 1348 * 1349 * Restart the slices for both READ and WRITES. It might happen 1350 * that a group's limit are dropped suddenly and we don't want to 1351 * account recently dispatched IO with new low rate. 1352 */ 1353 throtl_start_new_slice(tg, READ, false); 1354 throtl_start_new_slice(tg, WRITE, false); 1355 1356 if (tg->flags & THROTL_TG_PENDING) { 1357 tg_update_disptime(tg); 1358 throtl_schedule_next_dispatch(sq->parent_sq, true); 1359 } 1360 } 1361 1362 static ssize_t tg_set_conf(struct kernfs_open_file *of, 1363 char *buf, size_t nbytes, loff_t off, bool is_u64) 1364 { 1365 struct blkcg *blkcg = css_to_blkcg(of_css(of)); 1366 struct blkg_conf_ctx ctx; 1367 struct throtl_grp *tg; 1368 int ret; 1369 u64 v; 1370 1371 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx); 1372 if (ret) 1373 return ret; 1374 1375 ret = -EINVAL; 1376 if (sscanf(ctx.body, "%llu", &v) != 1) 1377 goto out_finish; 1378 if (!v) 1379 v = U64_MAX; 1380 1381 tg = blkg_to_tg(ctx.blkg); 1382 tg_update_carryover(tg); 1383 1384 if (is_u64) 1385 *(u64 *)((void *)tg + of_cft(of)->private) = v; 1386 else 1387 *(unsigned int *)((void *)tg + of_cft(of)->private) = v; 1388 1389 tg_conf_updated(tg, false); 1390 ret = 0; 1391 out_finish: 1392 blkg_conf_finish(&ctx); 1393 return ret ?: nbytes; 1394 } 1395 1396 static ssize_t tg_set_conf_u64(struct kernfs_open_file *of, 1397 char *buf, size_t nbytes, loff_t off) 1398 { 1399 return tg_set_conf(of, buf, nbytes, off, true); 1400 } 1401 1402 static ssize_t tg_set_conf_uint(struct kernfs_open_file *of, 1403 char *buf, size_t nbytes, loff_t off) 1404 { 1405 return tg_set_conf(of, buf, nbytes, off, false); 1406 } 1407 1408 static int tg_print_rwstat(struct seq_file *sf, void *v) 1409 { 1410 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), 1411 blkg_prfill_rwstat, &blkcg_policy_throtl, 1412 seq_cft(sf)->private, true); 1413 return 0; 1414 } 1415 1416 static u64 tg_prfill_rwstat_recursive(struct seq_file *sf, 1417 struct blkg_policy_data *pd, int off) 1418 { 1419 struct blkg_rwstat_sample sum; 1420 1421 blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_throtl, off, 1422 &sum); 1423 return __blkg_prfill_rwstat(sf, pd, &sum); 1424 } 1425 1426 static int tg_print_rwstat_recursive(struct seq_file *sf, void *v) 1427 { 1428 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), 1429 tg_prfill_rwstat_recursive, &blkcg_policy_throtl, 1430 seq_cft(sf)->private, true); 1431 return 0; 1432 } 1433 1434 static struct cftype throtl_legacy_files[] = { 1435 { 1436 .name = "throttle.read_bps_device", 1437 .private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]), 1438 .seq_show = tg_print_conf_u64, 1439 .write = tg_set_conf_u64, 1440 }, 1441 { 1442 .name = "throttle.write_bps_device", 1443 .private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]), 1444 .seq_show = tg_print_conf_u64, 1445 .write = tg_set_conf_u64, 1446 }, 1447 { 1448 .name = "throttle.read_iops_device", 1449 .private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]), 1450 .seq_show = tg_print_conf_uint, 1451 .write = tg_set_conf_uint, 1452 }, 1453 { 1454 .name = "throttle.write_iops_device", 1455 .private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]), 1456 .seq_show = tg_print_conf_uint, 1457 .write = tg_set_conf_uint, 1458 }, 1459 { 1460 .name = "throttle.io_service_bytes", 1461 .private = offsetof(struct throtl_grp, stat_bytes), 1462 .seq_show = tg_print_rwstat, 1463 }, 1464 { 1465 .name = "throttle.io_service_bytes_recursive", 1466 .private = offsetof(struct throtl_grp, stat_bytes), 1467 .seq_show = tg_print_rwstat_recursive, 1468 }, 1469 { 1470 .name = "throttle.io_serviced", 1471 .private = offsetof(struct throtl_grp, stat_ios), 1472 .seq_show = tg_print_rwstat, 1473 }, 1474 { 1475 .name = "throttle.io_serviced_recursive", 1476 .private = offsetof(struct throtl_grp, stat_ios), 1477 .seq_show = tg_print_rwstat_recursive, 1478 }, 1479 { } /* terminate */ 1480 }; 1481 1482 static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd, 1483 int off) 1484 { 1485 struct throtl_grp *tg = pd_to_tg(pd); 1486 const char *dname = blkg_dev_name(pd->blkg); 1487 char bufs[4][21] = { "max", "max", "max", "max" }; 1488 u64 bps_dft; 1489 unsigned int iops_dft; 1490 char idle_time[26] = ""; 1491 char latency_time[26] = ""; 1492 1493 if (!dname) 1494 return 0; 1495 1496 if (off == LIMIT_LOW) { 1497 bps_dft = 0; 1498 iops_dft = 0; 1499 } else { 1500 bps_dft = U64_MAX; 1501 iops_dft = UINT_MAX; 1502 } 1503 1504 if (tg->bps_conf[READ][off] == bps_dft && 1505 tg->bps_conf[WRITE][off] == bps_dft && 1506 tg->iops_conf[READ][off] == iops_dft && 1507 tg->iops_conf[WRITE][off] == iops_dft && 1508 (off != LIMIT_LOW || 1509 (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD && 1510 tg->latency_target_conf == DFL_LATENCY_TARGET))) 1511 return 0; 1512 1513 if (tg->bps_conf[READ][off] != U64_MAX) 1514 snprintf(bufs[0], sizeof(bufs[0]), "%llu", 1515 tg->bps_conf[READ][off]); 1516 if (tg->bps_conf[WRITE][off] != U64_MAX) 1517 snprintf(bufs[1], sizeof(bufs[1]), "%llu", 1518 tg->bps_conf[WRITE][off]); 1519 if (tg->iops_conf[READ][off] != UINT_MAX) 1520 snprintf(bufs[2], sizeof(bufs[2]), "%u", 1521 tg->iops_conf[READ][off]); 1522 if (tg->iops_conf[WRITE][off] != UINT_MAX) 1523 snprintf(bufs[3], sizeof(bufs[3]), "%u", 1524 tg->iops_conf[WRITE][off]); 1525 if (off == LIMIT_LOW) { 1526 if (tg->idletime_threshold_conf == ULONG_MAX) 1527 strcpy(idle_time, " idle=max"); 1528 else 1529 snprintf(idle_time, sizeof(idle_time), " idle=%lu", 1530 tg->idletime_threshold_conf); 1531 1532 if (tg->latency_target_conf == ULONG_MAX) 1533 strcpy(latency_time, " latency=max"); 1534 else 1535 snprintf(latency_time, sizeof(latency_time), 1536 " latency=%lu", tg->latency_target_conf); 1537 } 1538 1539 seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n", 1540 dname, bufs[0], bufs[1], bufs[2], bufs[3], idle_time, 1541 latency_time); 1542 return 0; 1543 } 1544 1545 static int tg_print_limit(struct seq_file *sf, void *v) 1546 { 1547 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit, 1548 &blkcg_policy_throtl, seq_cft(sf)->private, false); 1549 return 0; 1550 } 1551 1552 static ssize_t tg_set_limit(struct kernfs_open_file *of, 1553 char *buf, size_t nbytes, loff_t off) 1554 { 1555 struct blkcg *blkcg = css_to_blkcg(of_css(of)); 1556 struct blkg_conf_ctx ctx; 1557 struct throtl_grp *tg; 1558 u64 v[4]; 1559 unsigned long idle_time; 1560 unsigned long latency_time; 1561 int ret; 1562 int index = of_cft(of)->private; 1563 1564 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx); 1565 if (ret) 1566 return ret; 1567 1568 tg = blkg_to_tg(ctx.blkg); 1569 tg_update_carryover(tg); 1570 1571 v[0] = tg->bps_conf[READ][index]; 1572 v[1] = tg->bps_conf[WRITE][index]; 1573 v[2] = tg->iops_conf[READ][index]; 1574 v[3] = tg->iops_conf[WRITE][index]; 1575 1576 idle_time = tg->idletime_threshold_conf; 1577 latency_time = tg->latency_target_conf; 1578 while (true) { 1579 char tok[27]; /* wiops=18446744073709551616 */ 1580 char *p; 1581 u64 val = U64_MAX; 1582 int len; 1583 1584 if (sscanf(ctx.body, "%26s%n", tok, &len) != 1) 1585 break; 1586 if (tok[0] == '\0') 1587 break; 1588 ctx.body += len; 1589 1590 ret = -EINVAL; 1591 p = tok; 1592 strsep(&p, "="); 1593 if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max"))) 1594 goto out_finish; 1595 1596 ret = -ERANGE; 1597 if (!val) 1598 goto out_finish; 1599 1600 ret = -EINVAL; 1601 if (!strcmp(tok, "rbps") && val > 1) 1602 v[0] = val; 1603 else if (!strcmp(tok, "wbps") && val > 1) 1604 v[1] = val; 1605 else if (!strcmp(tok, "riops") && val > 1) 1606 v[2] = min_t(u64, val, UINT_MAX); 1607 else if (!strcmp(tok, "wiops") && val > 1) 1608 v[3] = min_t(u64, val, UINT_MAX); 1609 else if (off == LIMIT_LOW && !strcmp(tok, "idle")) 1610 idle_time = val; 1611 else if (off == LIMIT_LOW && !strcmp(tok, "latency")) 1612 latency_time = val; 1613 else 1614 goto out_finish; 1615 } 1616 1617 tg->bps_conf[READ][index] = v[0]; 1618 tg->bps_conf[WRITE][index] = v[1]; 1619 tg->iops_conf[READ][index] = v[2]; 1620 tg->iops_conf[WRITE][index] = v[3]; 1621 1622 if (index == LIMIT_MAX) { 1623 tg->bps[READ][index] = v[0]; 1624 tg->bps[WRITE][index] = v[1]; 1625 tg->iops[READ][index] = v[2]; 1626 tg->iops[WRITE][index] = v[3]; 1627 } 1628 tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW], 1629 tg->bps_conf[READ][LIMIT_MAX]); 1630 tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW], 1631 tg->bps_conf[WRITE][LIMIT_MAX]); 1632 tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW], 1633 tg->iops_conf[READ][LIMIT_MAX]); 1634 tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW], 1635 tg->iops_conf[WRITE][LIMIT_MAX]); 1636 tg->idletime_threshold_conf = idle_time; 1637 tg->latency_target_conf = latency_time; 1638 1639 /* force user to configure all settings for low limit */ 1640 if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] || 1641 tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) || 1642 tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD || 1643 tg->latency_target_conf == DFL_LATENCY_TARGET) { 1644 tg->bps[READ][LIMIT_LOW] = 0; 1645 tg->bps[WRITE][LIMIT_LOW] = 0; 1646 tg->iops[READ][LIMIT_LOW] = 0; 1647 tg->iops[WRITE][LIMIT_LOW] = 0; 1648 tg->idletime_threshold = DFL_IDLE_THRESHOLD; 1649 tg->latency_target = DFL_LATENCY_TARGET; 1650 } else if (index == LIMIT_LOW) { 1651 tg->idletime_threshold = tg->idletime_threshold_conf; 1652 tg->latency_target = tg->latency_target_conf; 1653 } 1654 1655 blk_throtl_update_limit_valid(tg->td); 1656 if (tg->td->limit_valid[LIMIT_LOW]) { 1657 if (index == LIMIT_LOW) 1658 tg->td->limit_index = LIMIT_LOW; 1659 } else 1660 tg->td->limit_index = LIMIT_MAX; 1661 tg_conf_updated(tg, index == LIMIT_LOW && 1662 tg->td->limit_valid[LIMIT_LOW]); 1663 ret = 0; 1664 out_finish: 1665 blkg_conf_finish(&ctx); 1666 return ret ?: nbytes; 1667 } 1668 1669 static struct cftype throtl_files[] = { 1670 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 1671 { 1672 .name = "low", 1673 .flags = CFTYPE_NOT_ON_ROOT, 1674 .seq_show = tg_print_limit, 1675 .write = tg_set_limit, 1676 .private = LIMIT_LOW, 1677 }, 1678 #endif 1679 { 1680 .name = "max", 1681 .flags = CFTYPE_NOT_ON_ROOT, 1682 .seq_show = tg_print_limit, 1683 .write = tg_set_limit, 1684 .private = LIMIT_MAX, 1685 }, 1686 { } /* terminate */ 1687 }; 1688 1689 static void throtl_shutdown_wq(struct request_queue *q) 1690 { 1691 struct throtl_data *td = q->td; 1692 1693 cancel_work_sync(&td->dispatch_work); 1694 } 1695 1696 struct blkcg_policy blkcg_policy_throtl = { 1697 .dfl_cftypes = throtl_files, 1698 .legacy_cftypes = throtl_legacy_files, 1699 1700 .pd_alloc_fn = throtl_pd_alloc, 1701 .pd_init_fn = throtl_pd_init, 1702 .pd_online_fn = throtl_pd_online, 1703 .pd_offline_fn = throtl_pd_offline, 1704 .pd_free_fn = throtl_pd_free, 1705 }; 1706 1707 void blk_throtl_cancel_bios(struct gendisk *disk) 1708 { 1709 struct request_queue *q = disk->queue; 1710 struct cgroup_subsys_state *pos_css; 1711 struct blkcg_gq *blkg; 1712 1713 spin_lock_irq(&q->queue_lock); 1714 /* 1715 * queue_lock is held, rcu lock is not needed here technically. 1716 * However, rcu lock is still held to emphasize that following 1717 * path need RCU protection and to prevent warning from lockdep. 1718 */ 1719 rcu_read_lock(); 1720 blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) { 1721 struct throtl_grp *tg = blkg_to_tg(blkg); 1722 struct throtl_service_queue *sq = &tg->service_queue; 1723 1724 /* 1725 * Set the flag to make sure throtl_pending_timer_fn() won't 1726 * stop until all throttled bios are dispatched. 1727 */ 1728 tg->flags |= THROTL_TG_CANCELING; 1729 1730 /* 1731 * Do not dispatch cgroup without THROTL_TG_PENDING or cgroup 1732 * will be inserted to service queue without THROTL_TG_PENDING 1733 * set in tg_update_disptime below. Then IO dispatched from 1734 * child in tg_dispatch_one_bio will trigger double insertion 1735 * and corrupt the tree. 1736 */ 1737 if (!(tg->flags & THROTL_TG_PENDING)) 1738 continue; 1739 1740 /* 1741 * Update disptime after setting the above flag to make sure 1742 * throtl_select_dispatch() won't exit without dispatching. 1743 */ 1744 tg_update_disptime(tg); 1745 1746 throtl_schedule_pending_timer(sq, jiffies + 1); 1747 } 1748 rcu_read_unlock(); 1749 spin_unlock_irq(&q->queue_lock); 1750 } 1751 1752 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 1753 static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg) 1754 { 1755 unsigned long rtime = jiffies, wtime = jiffies; 1756 1757 if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW]) 1758 rtime = tg->last_low_overflow_time[READ]; 1759 if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) 1760 wtime = tg->last_low_overflow_time[WRITE]; 1761 return min(rtime, wtime); 1762 } 1763 1764 static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg) 1765 { 1766 struct throtl_service_queue *parent_sq; 1767 struct throtl_grp *parent = tg; 1768 unsigned long ret = __tg_last_low_overflow_time(tg); 1769 1770 while (true) { 1771 parent_sq = parent->service_queue.parent_sq; 1772 parent = sq_to_tg(parent_sq); 1773 if (!parent) 1774 break; 1775 1776 /* 1777 * The parent doesn't have low limit, it always reaches low 1778 * limit. Its overflow time is useless for children 1779 */ 1780 if (!parent->bps[READ][LIMIT_LOW] && 1781 !parent->iops[READ][LIMIT_LOW] && 1782 !parent->bps[WRITE][LIMIT_LOW] && 1783 !parent->iops[WRITE][LIMIT_LOW]) 1784 continue; 1785 if (time_after(__tg_last_low_overflow_time(parent), ret)) 1786 ret = __tg_last_low_overflow_time(parent); 1787 } 1788 return ret; 1789 } 1790 1791 static bool throtl_tg_is_idle(struct throtl_grp *tg) 1792 { 1793 /* 1794 * cgroup is idle if: 1795 * - single idle is too long, longer than a fixed value (in case user 1796 * configure a too big threshold) or 4 times of idletime threshold 1797 * - average think time is more than threshold 1798 * - IO latency is largely below threshold 1799 */ 1800 unsigned long time; 1801 bool ret; 1802 1803 time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold); 1804 ret = tg->latency_target == DFL_LATENCY_TARGET || 1805 tg->idletime_threshold == DFL_IDLE_THRESHOLD || 1806 (ktime_get_ns() >> 10) - tg->last_finish_time > time || 1807 tg->avg_idletime > tg->idletime_threshold || 1808 (tg->latency_target && tg->bio_cnt && 1809 tg->bad_bio_cnt * 5 < tg->bio_cnt); 1810 throtl_log(&tg->service_queue, 1811 "avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d", 1812 tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt, 1813 tg->bio_cnt, ret, tg->td->scale); 1814 return ret; 1815 } 1816 1817 static bool throtl_low_limit_reached(struct throtl_grp *tg, int rw) 1818 { 1819 struct throtl_service_queue *sq = &tg->service_queue; 1820 bool limit = tg->bps[rw][LIMIT_LOW] || tg->iops[rw][LIMIT_LOW]; 1821 1822 /* 1823 * if low limit is zero, low limit is always reached. 1824 * if low limit is non-zero, we can check if there is any request 1825 * is queued to determine if low limit is reached as we throttle 1826 * request according to limit. 1827 */ 1828 return !limit || sq->nr_queued[rw]; 1829 } 1830 1831 static bool throtl_tg_can_upgrade(struct throtl_grp *tg) 1832 { 1833 /* 1834 * cgroup reaches low limit when low limit of READ and WRITE are 1835 * both reached, it's ok to upgrade to next limit if cgroup reaches 1836 * low limit 1837 */ 1838 if (throtl_low_limit_reached(tg, READ) && 1839 throtl_low_limit_reached(tg, WRITE)) 1840 return true; 1841 1842 if (time_after_eq(jiffies, 1843 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) && 1844 throtl_tg_is_idle(tg)) 1845 return true; 1846 return false; 1847 } 1848 1849 static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg) 1850 { 1851 while (true) { 1852 if (throtl_tg_can_upgrade(tg)) 1853 return true; 1854 tg = sq_to_tg(tg->service_queue.parent_sq); 1855 if (!tg || !tg_to_blkg(tg)->parent) 1856 return false; 1857 } 1858 return false; 1859 } 1860 1861 static bool throtl_can_upgrade(struct throtl_data *td, 1862 struct throtl_grp *this_tg) 1863 { 1864 struct cgroup_subsys_state *pos_css; 1865 struct blkcg_gq *blkg; 1866 1867 if (td->limit_index != LIMIT_LOW) 1868 return false; 1869 1870 if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice)) 1871 return false; 1872 1873 rcu_read_lock(); 1874 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { 1875 struct throtl_grp *tg = blkg_to_tg(blkg); 1876 1877 if (tg == this_tg) 1878 continue; 1879 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children)) 1880 continue; 1881 if (!throtl_hierarchy_can_upgrade(tg)) { 1882 rcu_read_unlock(); 1883 return false; 1884 } 1885 } 1886 rcu_read_unlock(); 1887 return true; 1888 } 1889 1890 static void throtl_upgrade_check(struct throtl_grp *tg) 1891 { 1892 unsigned long now = jiffies; 1893 1894 if (tg->td->limit_index != LIMIT_LOW) 1895 return; 1896 1897 if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) 1898 return; 1899 1900 tg->last_check_time = now; 1901 1902 if (!time_after_eq(now, 1903 __tg_last_low_overflow_time(tg) + tg->td->throtl_slice)) 1904 return; 1905 1906 if (throtl_can_upgrade(tg->td, NULL)) 1907 throtl_upgrade_state(tg->td); 1908 } 1909 1910 static void throtl_upgrade_state(struct throtl_data *td) 1911 { 1912 struct cgroup_subsys_state *pos_css; 1913 struct blkcg_gq *blkg; 1914 1915 throtl_log(&td->service_queue, "upgrade to max"); 1916 td->limit_index = LIMIT_MAX; 1917 td->low_upgrade_time = jiffies; 1918 td->scale = 0; 1919 rcu_read_lock(); 1920 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { 1921 struct throtl_grp *tg = blkg_to_tg(blkg); 1922 struct throtl_service_queue *sq = &tg->service_queue; 1923 1924 tg->disptime = jiffies - 1; 1925 throtl_select_dispatch(sq); 1926 throtl_schedule_next_dispatch(sq, true); 1927 } 1928 rcu_read_unlock(); 1929 throtl_select_dispatch(&td->service_queue); 1930 throtl_schedule_next_dispatch(&td->service_queue, true); 1931 queue_work(kthrotld_workqueue, &td->dispatch_work); 1932 } 1933 1934 static void throtl_downgrade_state(struct throtl_data *td) 1935 { 1936 td->scale /= 2; 1937 1938 throtl_log(&td->service_queue, "downgrade, scale %d", td->scale); 1939 if (td->scale) { 1940 td->low_upgrade_time = jiffies - td->scale * td->throtl_slice; 1941 return; 1942 } 1943 1944 td->limit_index = LIMIT_LOW; 1945 td->low_downgrade_time = jiffies; 1946 } 1947 1948 static bool throtl_tg_can_downgrade(struct throtl_grp *tg) 1949 { 1950 struct throtl_data *td = tg->td; 1951 unsigned long now = jiffies; 1952 1953 /* 1954 * If cgroup is below low limit, consider downgrade and throttle other 1955 * cgroups 1956 */ 1957 if (time_after_eq(now, tg_last_low_overflow_time(tg) + 1958 td->throtl_slice) && 1959 (!throtl_tg_is_idle(tg) || 1960 !list_empty(&tg_to_blkg(tg)->blkcg->css.children))) 1961 return true; 1962 return false; 1963 } 1964 1965 static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg) 1966 { 1967 struct throtl_data *td = tg->td; 1968 1969 if (time_before(jiffies, td->low_upgrade_time + td->throtl_slice)) 1970 return false; 1971 1972 while (true) { 1973 if (!throtl_tg_can_downgrade(tg)) 1974 return false; 1975 tg = sq_to_tg(tg->service_queue.parent_sq); 1976 if (!tg || !tg_to_blkg(tg)->parent) 1977 break; 1978 } 1979 return true; 1980 } 1981 1982 static void throtl_downgrade_check(struct throtl_grp *tg) 1983 { 1984 uint64_t bps; 1985 unsigned int iops; 1986 unsigned long elapsed_time; 1987 unsigned long now = jiffies; 1988 1989 if (tg->td->limit_index != LIMIT_MAX || 1990 !tg->td->limit_valid[LIMIT_LOW]) 1991 return; 1992 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children)) 1993 return; 1994 if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) 1995 return; 1996 1997 elapsed_time = now - tg->last_check_time; 1998 tg->last_check_time = now; 1999 2000 if (time_before(now, tg_last_low_overflow_time(tg) + 2001 tg->td->throtl_slice)) 2002 return; 2003 2004 if (tg->bps[READ][LIMIT_LOW]) { 2005 bps = tg->last_bytes_disp[READ] * HZ; 2006 do_div(bps, elapsed_time); 2007 if (bps >= tg->bps[READ][LIMIT_LOW]) 2008 tg->last_low_overflow_time[READ] = now; 2009 } 2010 2011 if (tg->bps[WRITE][LIMIT_LOW]) { 2012 bps = tg->last_bytes_disp[WRITE] * HZ; 2013 do_div(bps, elapsed_time); 2014 if (bps >= tg->bps[WRITE][LIMIT_LOW]) 2015 tg->last_low_overflow_time[WRITE] = now; 2016 } 2017 2018 if (tg->iops[READ][LIMIT_LOW]) { 2019 iops = tg->last_io_disp[READ] * HZ / elapsed_time; 2020 if (iops >= tg->iops[READ][LIMIT_LOW]) 2021 tg->last_low_overflow_time[READ] = now; 2022 } 2023 2024 if (tg->iops[WRITE][LIMIT_LOW]) { 2025 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time; 2026 if (iops >= tg->iops[WRITE][LIMIT_LOW]) 2027 tg->last_low_overflow_time[WRITE] = now; 2028 } 2029 2030 /* 2031 * If cgroup is below low limit, consider downgrade and throttle other 2032 * cgroups 2033 */ 2034 if (throtl_hierarchy_can_downgrade(tg)) 2035 throtl_downgrade_state(tg->td); 2036 2037 tg->last_bytes_disp[READ] = 0; 2038 tg->last_bytes_disp[WRITE] = 0; 2039 tg->last_io_disp[READ] = 0; 2040 tg->last_io_disp[WRITE] = 0; 2041 } 2042 2043 static void blk_throtl_update_idletime(struct throtl_grp *tg) 2044 { 2045 unsigned long now; 2046 unsigned long last_finish_time = tg->last_finish_time; 2047 2048 if (last_finish_time == 0) 2049 return; 2050 2051 now = ktime_get_ns() >> 10; 2052 if (now <= last_finish_time || 2053 last_finish_time == tg->checked_last_finish_time) 2054 return; 2055 2056 tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3; 2057 tg->checked_last_finish_time = last_finish_time; 2058 } 2059 2060 static void throtl_update_latency_buckets(struct throtl_data *td) 2061 { 2062 struct avg_latency_bucket avg_latency[2][LATENCY_BUCKET_SIZE]; 2063 int i, cpu, rw; 2064 unsigned long last_latency[2] = { 0 }; 2065 unsigned long latency[2]; 2066 2067 if (!blk_queue_nonrot(td->queue) || !td->limit_valid[LIMIT_LOW]) 2068 return; 2069 if (time_before(jiffies, td->last_calculate_time + HZ)) 2070 return; 2071 td->last_calculate_time = jiffies; 2072 2073 memset(avg_latency, 0, sizeof(avg_latency)); 2074 for (rw = READ; rw <= WRITE; rw++) { 2075 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) { 2076 struct latency_bucket *tmp = &td->tmp_buckets[rw][i]; 2077 2078 for_each_possible_cpu(cpu) { 2079 struct latency_bucket *bucket; 2080 2081 /* this isn't race free, but ok in practice */ 2082 bucket = per_cpu_ptr(td->latency_buckets[rw], 2083 cpu); 2084 tmp->total_latency += bucket[i].total_latency; 2085 tmp->samples += bucket[i].samples; 2086 bucket[i].total_latency = 0; 2087 bucket[i].samples = 0; 2088 } 2089 2090 if (tmp->samples >= 32) { 2091 int samples = tmp->samples; 2092 2093 latency[rw] = tmp->total_latency; 2094 2095 tmp->total_latency = 0; 2096 tmp->samples = 0; 2097 latency[rw] /= samples; 2098 if (latency[rw] == 0) 2099 continue; 2100 avg_latency[rw][i].latency = latency[rw]; 2101 } 2102 } 2103 } 2104 2105 for (rw = READ; rw <= WRITE; rw++) { 2106 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) { 2107 if (!avg_latency[rw][i].latency) { 2108 if (td->avg_buckets[rw][i].latency < last_latency[rw]) 2109 td->avg_buckets[rw][i].latency = 2110 last_latency[rw]; 2111 continue; 2112 } 2113 2114 if (!td->avg_buckets[rw][i].valid) 2115 latency[rw] = avg_latency[rw][i].latency; 2116 else 2117 latency[rw] = (td->avg_buckets[rw][i].latency * 7 + 2118 avg_latency[rw][i].latency) >> 3; 2119 2120 td->avg_buckets[rw][i].latency = max(latency[rw], 2121 last_latency[rw]); 2122 td->avg_buckets[rw][i].valid = true; 2123 last_latency[rw] = td->avg_buckets[rw][i].latency; 2124 } 2125 } 2126 2127 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) 2128 throtl_log(&td->service_queue, 2129 "Latency bucket %d: read latency=%ld, read valid=%d, " 2130 "write latency=%ld, write valid=%d", i, 2131 td->avg_buckets[READ][i].latency, 2132 td->avg_buckets[READ][i].valid, 2133 td->avg_buckets[WRITE][i].latency, 2134 td->avg_buckets[WRITE][i].valid); 2135 } 2136 #else 2137 static inline void throtl_update_latency_buckets(struct throtl_data *td) 2138 { 2139 } 2140 2141 static void blk_throtl_update_idletime(struct throtl_grp *tg) 2142 { 2143 } 2144 2145 static void throtl_downgrade_check(struct throtl_grp *tg) 2146 { 2147 } 2148 2149 static void throtl_upgrade_check(struct throtl_grp *tg) 2150 { 2151 } 2152 2153 static bool throtl_can_upgrade(struct throtl_data *td, 2154 struct throtl_grp *this_tg) 2155 { 2156 return false; 2157 } 2158 2159 static void throtl_upgrade_state(struct throtl_data *td) 2160 { 2161 } 2162 #endif 2163 2164 bool __blk_throtl_bio(struct bio *bio) 2165 { 2166 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 2167 struct blkcg_gq *blkg = bio->bi_blkg; 2168 struct throtl_qnode *qn = NULL; 2169 struct throtl_grp *tg = blkg_to_tg(blkg); 2170 struct throtl_service_queue *sq; 2171 bool rw = bio_data_dir(bio); 2172 bool throttled = false; 2173 struct throtl_data *td = tg->td; 2174 2175 rcu_read_lock(); 2176 2177 if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) { 2178 blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf, 2179 bio->bi_iter.bi_size); 2180 blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1); 2181 } 2182 2183 spin_lock_irq(&q->queue_lock); 2184 2185 throtl_update_latency_buckets(td); 2186 2187 blk_throtl_update_idletime(tg); 2188 2189 sq = &tg->service_queue; 2190 2191 again: 2192 while (true) { 2193 if (tg->last_low_overflow_time[rw] == 0) 2194 tg->last_low_overflow_time[rw] = jiffies; 2195 throtl_downgrade_check(tg); 2196 throtl_upgrade_check(tg); 2197 /* throtl is FIFO - if bios are already queued, should queue */ 2198 if (sq->nr_queued[rw]) 2199 break; 2200 2201 /* if above limits, break to queue */ 2202 if (!tg_may_dispatch(tg, bio, NULL)) { 2203 tg->last_low_overflow_time[rw] = jiffies; 2204 if (throtl_can_upgrade(td, tg)) { 2205 throtl_upgrade_state(td); 2206 goto again; 2207 } 2208 break; 2209 } 2210 2211 /* within limits, let's charge and dispatch directly */ 2212 throtl_charge_bio(tg, bio); 2213 2214 /* 2215 * We need to trim slice even when bios are not being queued 2216 * otherwise it might happen that a bio is not queued for 2217 * a long time and slice keeps on extending and trim is not 2218 * called for a long time. Now if limits are reduced suddenly 2219 * we take into account all the IO dispatched so far at new 2220 * low rate and * newly queued IO gets a really long dispatch 2221 * time. 2222 * 2223 * So keep on trimming slice even if bio is not queued. 2224 */ 2225 throtl_trim_slice(tg, rw); 2226 2227 /* 2228 * @bio passed through this layer without being throttled. 2229 * Climb up the ladder. If we're already at the top, it 2230 * can be executed directly. 2231 */ 2232 qn = &tg->qnode_on_parent[rw]; 2233 sq = sq->parent_sq; 2234 tg = sq_to_tg(sq); 2235 if (!tg) { 2236 bio_set_flag(bio, BIO_BPS_THROTTLED); 2237 goto out_unlock; 2238 } 2239 } 2240 2241 /* out-of-limit, queue to @tg */ 2242 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d", 2243 rw == READ ? 'R' : 'W', 2244 tg->bytes_disp[rw], bio->bi_iter.bi_size, 2245 tg_bps_limit(tg, rw), 2246 tg->io_disp[rw], tg_iops_limit(tg, rw), 2247 sq->nr_queued[READ], sq->nr_queued[WRITE]); 2248 2249 tg->last_low_overflow_time[rw] = jiffies; 2250 2251 td->nr_queued[rw]++; 2252 throtl_add_bio_tg(bio, qn, tg); 2253 throttled = true; 2254 2255 /* 2256 * Update @tg's dispatch time and force schedule dispatch if @tg 2257 * was empty before @bio. The forced scheduling isn't likely to 2258 * cause undue delay as @bio is likely to be dispatched directly if 2259 * its @tg's disptime is not in the future. 2260 */ 2261 if (tg->flags & THROTL_TG_WAS_EMPTY) { 2262 tg_update_disptime(tg); 2263 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true); 2264 } 2265 2266 out_unlock: 2267 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 2268 if (throttled || !td->track_bio_latency) 2269 bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY; 2270 #endif 2271 spin_unlock_irq(&q->queue_lock); 2272 2273 rcu_read_unlock(); 2274 return throttled; 2275 } 2276 2277 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 2278 static void throtl_track_latency(struct throtl_data *td, sector_t size, 2279 enum req_op op, unsigned long time) 2280 { 2281 const bool rw = op_is_write(op); 2282 struct latency_bucket *latency; 2283 int index; 2284 2285 if (!td || td->limit_index != LIMIT_LOW || 2286 !(op == REQ_OP_READ || op == REQ_OP_WRITE) || 2287 !blk_queue_nonrot(td->queue)) 2288 return; 2289 2290 index = request_bucket_index(size); 2291 2292 latency = get_cpu_ptr(td->latency_buckets[rw]); 2293 latency[index].total_latency += time; 2294 latency[index].samples++; 2295 put_cpu_ptr(td->latency_buckets[rw]); 2296 } 2297 2298 void blk_throtl_stat_add(struct request *rq, u64 time_ns) 2299 { 2300 struct request_queue *q = rq->q; 2301 struct throtl_data *td = q->td; 2302 2303 throtl_track_latency(td, blk_rq_stats_sectors(rq), req_op(rq), 2304 time_ns >> 10); 2305 } 2306 2307 void blk_throtl_bio_endio(struct bio *bio) 2308 { 2309 struct blkcg_gq *blkg; 2310 struct throtl_grp *tg; 2311 u64 finish_time_ns; 2312 unsigned long finish_time; 2313 unsigned long start_time; 2314 unsigned long lat; 2315 int rw = bio_data_dir(bio); 2316 2317 blkg = bio->bi_blkg; 2318 if (!blkg) 2319 return; 2320 tg = blkg_to_tg(blkg); 2321 if (!tg->td->limit_valid[LIMIT_LOW]) 2322 return; 2323 2324 finish_time_ns = ktime_get_ns(); 2325 tg->last_finish_time = finish_time_ns >> 10; 2326 2327 start_time = bio_issue_time(&bio->bi_issue) >> 10; 2328 finish_time = __bio_issue_time(finish_time_ns) >> 10; 2329 if (!start_time || finish_time <= start_time) 2330 return; 2331 2332 lat = finish_time - start_time; 2333 /* this is only for bio based driver */ 2334 if (!(bio->bi_issue.value & BIO_ISSUE_THROTL_SKIP_LATENCY)) 2335 throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue), 2336 bio_op(bio), lat); 2337 2338 if (tg->latency_target && lat >= tg->td->filtered_latency) { 2339 int bucket; 2340 unsigned int threshold; 2341 2342 bucket = request_bucket_index(bio_issue_size(&bio->bi_issue)); 2343 threshold = tg->td->avg_buckets[rw][bucket].latency + 2344 tg->latency_target; 2345 if (lat > threshold) 2346 tg->bad_bio_cnt++; 2347 /* 2348 * Not race free, could get wrong count, which means cgroups 2349 * will be throttled 2350 */ 2351 tg->bio_cnt++; 2352 } 2353 2354 if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) { 2355 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies; 2356 tg->bio_cnt /= 2; 2357 tg->bad_bio_cnt /= 2; 2358 } 2359 } 2360 #endif 2361 2362 int blk_throtl_init(struct gendisk *disk) 2363 { 2364 struct request_queue *q = disk->queue; 2365 struct throtl_data *td; 2366 int ret; 2367 2368 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node); 2369 if (!td) 2370 return -ENOMEM; 2371 td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) * 2372 LATENCY_BUCKET_SIZE, __alignof__(u64)); 2373 if (!td->latency_buckets[READ]) { 2374 kfree(td); 2375 return -ENOMEM; 2376 } 2377 td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) * 2378 LATENCY_BUCKET_SIZE, __alignof__(u64)); 2379 if (!td->latency_buckets[WRITE]) { 2380 free_percpu(td->latency_buckets[READ]); 2381 kfree(td); 2382 return -ENOMEM; 2383 } 2384 2385 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn); 2386 throtl_service_queue_init(&td->service_queue); 2387 2388 q->td = td; 2389 td->queue = q; 2390 2391 td->limit_valid[LIMIT_MAX] = true; 2392 td->limit_index = LIMIT_MAX; 2393 td->low_upgrade_time = jiffies; 2394 td->low_downgrade_time = jiffies; 2395 2396 /* activate policy */ 2397 ret = blkcg_activate_policy(disk, &blkcg_policy_throtl); 2398 if (ret) { 2399 free_percpu(td->latency_buckets[READ]); 2400 free_percpu(td->latency_buckets[WRITE]); 2401 kfree(td); 2402 } 2403 return ret; 2404 } 2405 2406 void blk_throtl_exit(struct gendisk *disk) 2407 { 2408 struct request_queue *q = disk->queue; 2409 2410 BUG_ON(!q->td); 2411 del_timer_sync(&q->td->service_queue.pending_timer); 2412 throtl_shutdown_wq(q); 2413 blkcg_deactivate_policy(disk, &blkcg_policy_throtl); 2414 free_percpu(q->td->latency_buckets[READ]); 2415 free_percpu(q->td->latency_buckets[WRITE]); 2416 kfree(q->td); 2417 } 2418 2419 void blk_throtl_register(struct gendisk *disk) 2420 { 2421 struct request_queue *q = disk->queue; 2422 struct throtl_data *td; 2423 int i; 2424 2425 td = q->td; 2426 BUG_ON(!td); 2427 2428 if (blk_queue_nonrot(q)) { 2429 td->throtl_slice = DFL_THROTL_SLICE_SSD; 2430 td->filtered_latency = LATENCY_FILTERED_SSD; 2431 } else { 2432 td->throtl_slice = DFL_THROTL_SLICE_HD; 2433 td->filtered_latency = LATENCY_FILTERED_HD; 2434 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) { 2435 td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY; 2436 td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY; 2437 } 2438 } 2439 #ifndef CONFIG_BLK_DEV_THROTTLING_LOW 2440 /* if no low limit, use previous default */ 2441 td->throtl_slice = DFL_THROTL_SLICE_HD; 2442 #endif 2443 2444 td->track_bio_latency = !queue_is_mq(q); 2445 if (!td->track_bio_latency) 2446 blk_stat_enable_accounting(q); 2447 } 2448 2449 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 2450 ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page) 2451 { 2452 if (!q->td) 2453 return -EINVAL; 2454 return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice)); 2455 } 2456 2457 ssize_t blk_throtl_sample_time_store(struct request_queue *q, 2458 const char *page, size_t count) 2459 { 2460 unsigned long v; 2461 unsigned long t; 2462 2463 if (!q->td) 2464 return -EINVAL; 2465 if (kstrtoul(page, 10, &v)) 2466 return -EINVAL; 2467 t = msecs_to_jiffies(v); 2468 if (t == 0 || t > MAX_THROTL_SLICE) 2469 return -EINVAL; 2470 q->td->throtl_slice = t; 2471 return count; 2472 } 2473 #endif 2474 2475 static int __init throtl_init(void) 2476 { 2477 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0); 2478 if (!kthrotld_workqueue) 2479 panic("Failed to create kthrotld\n"); 2480 2481 return blkcg_policy_register(&blkcg_policy_throtl); 2482 } 2483 2484 module_init(throtl_init); 2485