1 /* 2 * Interface for controlling IO bandwidth on a request queue 3 * 4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com> 5 */ 6 7 #include <linux/module.h> 8 #include <linux/slab.h> 9 #include <linux/blkdev.h> 10 #include <linux/bio.h> 11 #include <linux/blktrace_api.h> 12 #include "blk-cgroup.h" 13 14 /* Max dispatch from a group in 1 round */ 15 static int throtl_grp_quantum = 8; 16 17 /* Total max dispatch from all groups in one round */ 18 static int throtl_quantum = 32; 19 20 /* Throttling is performed over 100ms slice and after that slice is renewed */ 21 static unsigned long throtl_slice = HZ/10; /* 100 ms */ 22 23 /* A workqueue to queue throttle related work */ 24 static struct workqueue_struct *kthrotld_workqueue; 25 static void throtl_schedule_delayed_work(struct throtl_data *td, 26 unsigned long delay); 27 28 struct throtl_rb_root { 29 struct rb_root rb; 30 struct rb_node *left; 31 unsigned int count; 32 unsigned long min_disptime; 33 }; 34 35 #define THROTL_RB_ROOT (struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \ 36 .count = 0, .min_disptime = 0} 37 38 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node) 39 40 struct throtl_grp { 41 /* List of throtl groups on the request queue*/ 42 struct hlist_node tg_node; 43 44 /* active throtl group service_tree member */ 45 struct rb_node rb_node; 46 47 /* 48 * Dispatch time in jiffies. This is the estimated time when group 49 * will unthrottle and is ready to dispatch more bio. It is used as 50 * key to sort active groups in service tree. 51 */ 52 unsigned long disptime; 53 54 struct blkio_group blkg; 55 atomic_t ref; 56 unsigned int flags; 57 58 /* Two lists for READ and WRITE */ 59 struct bio_list bio_lists[2]; 60 61 /* Number of queued bios on READ and WRITE lists */ 62 unsigned int nr_queued[2]; 63 64 /* bytes per second rate limits */ 65 uint64_t bps[2]; 66 67 /* IOPS limits */ 68 unsigned int iops[2]; 69 70 /* Number of bytes disptached in current slice */ 71 uint64_t bytes_disp[2]; 72 /* Number of bio's dispatched in current slice */ 73 unsigned int io_disp[2]; 74 75 /* When did we start a new slice */ 76 unsigned long slice_start[2]; 77 unsigned long slice_end[2]; 78 79 /* Some throttle limits got updated for the group */ 80 int limits_changed; 81 }; 82 83 struct throtl_data 84 { 85 /* List of throtl groups */ 86 struct hlist_head tg_list; 87 88 /* service tree for active throtl groups */ 89 struct throtl_rb_root tg_service_tree; 90 91 struct throtl_grp root_tg; 92 struct request_queue *queue; 93 94 /* Total Number of queued bios on READ and WRITE lists */ 95 unsigned int nr_queued[2]; 96 97 /* 98 * number of total undestroyed groups 99 */ 100 unsigned int nr_undestroyed_grps; 101 102 /* Work for dispatching throttled bios */ 103 struct delayed_work throtl_work; 104 105 int limits_changed; 106 }; 107 108 enum tg_state_flags { 109 THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */ 110 }; 111 112 #define THROTL_TG_FNS(name) \ 113 static inline void throtl_mark_tg_##name(struct throtl_grp *tg) \ 114 { \ 115 (tg)->flags |= (1 << THROTL_TG_FLAG_##name); \ 116 } \ 117 static inline void throtl_clear_tg_##name(struct throtl_grp *tg) \ 118 { \ 119 (tg)->flags &= ~(1 << THROTL_TG_FLAG_##name); \ 120 } \ 121 static inline int throtl_tg_##name(const struct throtl_grp *tg) \ 122 { \ 123 return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0; \ 124 } 125 126 THROTL_TG_FNS(on_rr); 127 128 #define throtl_log_tg(td, tg, fmt, args...) \ 129 blk_add_trace_msg((td)->queue, "throtl %s " fmt, \ 130 blkg_path(&(tg)->blkg), ##args); \ 131 132 #define throtl_log(td, fmt, args...) \ 133 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args) 134 135 static inline struct throtl_grp *tg_of_blkg(struct blkio_group *blkg) 136 { 137 if (blkg) 138 return container_of(blkg, struct throtl_grp, blkg); 139 140 return NULL; 141 } 142 143 static inline int total_nr_queued(struct throtl_data *td) 144 { 145 return (td->nr_queued[0] + td->nr_queued[1]); 146 } 147 148 static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg) 149 { 150 atomic_inc(&tg->ref); 151 return tg; 152 } 153 154 static void throtl_put_tg(struct throtl_grp *tg) 155 { 156 BUG_ON(atomic_read(&tg->ref) <= 0); 157 if (!atomic_dec_and_test(&tg->ref)) 158 return; 159 kfree(tg); 160 } 161 162 static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td, 163 struct cgroup *cgroup) 164 { 165 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); 166 struct throtl_grp *tg = NULL; 167 void *key = td; 168 struct backing_dev_info *bdi = &td->queue->backing_dev_info; 169 unsigned int major, minor; 170 171 /* 172 * TODO: Speed up blkiocg_lookup_group() by maintaining a radix 173 * tree of blkg (instead of traversing through hash list all 174 * the time. 175 */ 176 177 /* 178 * This is the common case when there are no blkio cgroups. 179 * Avoid lookup in this case 180 */ 181 if (blkcg == &blkio_root_cgroup) 182 tg = &td->root_tg; 183 else 184 tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key)); 185 186 /* Fill in device details for root group */ 187 if (tg && !tg->blkg.dev && bdi->dev && dev_name(bdi->dev)) { 188 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); 189 tg->blkg.dev = MKDEV(major, minor); 190 goto done; 191 } 192 193 if (tg) 194 goto done; 195 196 tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node); 197 if (!tg) 198 goto done; 199 200 INIT_HLIST_NODE(&tg->tg_node); 201 RB_CLEAR_NODE(&tg->rb_node); 202 bio_list_init(&tg->bio_lists[0]); 203 bio_list_init(&tg->bio_lists[1]); 204 td->limits_changed = false; 205 206 /* 207 * Take the initial reference that will be released on destroy 208 * This can be thought of a joint reference by cgroup and 209 * request queue which will be dropped by either request queue 210 * exit or cgroup deletion path depending on who is exiting first. 211 */ 212 atomic_set(&tg->ref, 1); 213 214 /* Add group onto cgroup list */ 215 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); 216 blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td, 217 MKDEV(major, minor), BLKIO_POLICY_THROTL); 218 219 tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev); 220 tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev); 221 tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev); 222 tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev); 223 224 hlist_add_head(&tg->tg_node, &td->tg_list); 225 td->nr_undestroyed_grps++; 226 done: 227 return tg; 228 } 229 230 static struct throtl_grp * throtl_get_tg(struct throtl_data *td) 231 { 232 struct cgroup *cgroup; 233 struct throtl_grp *tg = NULL; 234 235 rcu_read_lock(); 236 cgroup = task_cgroup(current, blkio_subsys_id); 237 tg = throtl_find_alloc_tg(td, cgroup); 238 if (!tg) 239 tg = &td->root_tg; 240 rcu_read_unlock(); 241 return tg; 242 } 243 244 static struct throtl_grp *throtl_rb_first(struct throtl_rb_root *root) 245 { 246 /* Service tree is empty */ 247 if (!root->count) 248 return NULL; 249 250 if (!root->left) 251 root->left = rb_first(&root->rb); 252 253 if (root->left) 254 return rb_entry_tg(root->left); 255 256 return NULL; 257 } 258 259 static void rb_erase_init(struct rb_node *n, struct rb_root *root) 260 { 261 rb_erase(n, root); 262 RB_CLEAR_NODE(n); 263 } 264 265 static void throtl_rb_erase(struct rb_node *n, struct throtl_rb_root *root) 266 { 267 if (root->left == n) 268 root->left = NULL; 269 rb_erase_init(n, &root->rb); 270 --root->count; 271 } 272 273 static void update_min_dispatch_time(struct throtl_rb_root *st) 274 { 275 struct throtl_grp *tg; 276 277 tg = throtl_rb_first(st); 278 if (!tg) 279 return; 280 281 st->min_disptime = tg->disptime; 282 } 283 284 static void 285 tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg) 286 { 287 struct rb_node **node = &st->rb.rb_node; 288 struct rb_node *parent = NULL; 289 struct throtl_grp *__tg; 290 unsigned long key = tg->disptime; 291 int left = 1; 292 293 while (*node != NULL) { 294 parent = *node; 295 __tg = rb_entry_tg(parent); 296 297 if (time_before(key, __tg->disptime)) 298 node = &parent->rb_left; 299 else { 300 node = &parent->rb_right; 301 left = 0; 302 } 303 } 304 305 if (left) 306 st->left = &tg->rb_node; 307 308 rb_link_node(&tg->rb_node, parent, node); 309 rb_insert_color(&tg->rb_node, &st->rb); 310 } 311 312 static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg) 313 { 314 struct throtl_rb_root *st = &td->tg_service_tree; 315 316 tg_service_tree_add(st, tg); 317 throtl_mark_tg_on_rr(tg); 318 st->count++; 319 } 320 321 static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg) 322 { 323 if (!throtl_tg_on_rr(tg)) 324 __throtl_enqueue_tg(td, tg); 325 } 326 327 static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg) 328 { 329 throtl_rb_erase(&tg->rb_node, &td->tg_service_tree); 330 throtl_clear_tg_on_rr(tg); 331 } 332 333 static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg) 334 { 335 if (throtl_tg_on_rr(tg)) 336 __throtl_dequeue_tg(td, tg); 337 } 338 339 static void throtl_schedule_next_dispatch(struct throtl_data *td) 340 { 341 struct throtl_rb_root *st = &td->tg_service_tree; 342 343 /* 344 * If there are more bios pending, schedule more work. 345 */ 346 if (!total_nr_queued(td)) 347 return; 348 349 BUG_ON(!st->count); 350 351 update_min_dispatch_time(st); 352 353 if (time_before_eq(st->min_disptime, jiffies)) 354 throtl_schedule_delayed_work(td, 0); 355 else 356 throtl_schedule_delayed_work(td, (st->min_disptime - jiffies)); 357 } 358 359 static inline void 360 throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw) 361 { 362 tg->bytes_disp[rw] = 0; 363 tg->io_disp[rw] = 0; 364 tg->slice_start[rw] = jiffies; 365 tg->slice_end[rw] = jiffies + throtl_slice; 366 throtl_log_tg(td, tg, "[%c] new slice start=%lu end=%lu jiffies=%lu", 367 rw == READ ? 'R' : 'W', tg->slice_start[rw], 368 tg->slice_end[rw], jiffies); 369 } 370 371 static inline void throtl_set_slice_end(struct throtl_data *td, 372 struct throtl_grp *tg, bool rw, unsigned long jiffy_end) 373 { 374 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice); 375 } 376 377 static inline void throtl_extend_slice(struct throtl_data *td, 378 struct throtl_grp *tg, bool rw, unsigned long jiffy_end) 379 { 380 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice); 381 throtl_log_tg(td, tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu", 382 rw == READ ? 'R' : 'W', tg->slice_start[rw], 383 tg->slice_end[rw], jiffies); 384 } 385 386 /* Determine if previously allocated or extended slice is complete or not */ 387 static bool 388 throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw) 389 { 390 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw])) 391 return 0; 392 393 return 1; 394 } 395 396 /* Trim the used slices and adjust slice start accordingly */ 397 static inline void 398 throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw) 399 { 400 unsigned long nr_slices, time_elapsed, io_trim; 401 u64 bytes_trim, tmp; 402 403 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw])); 404 405 /* 406 * If bps are unlimited (-1), then time slice don't get 407 * renewed. Don't try to trim the slice if slice is used. A new 408 * slice will start when appropriate. 409 */ 410 if (throtl_slice_used(td, tg, rw)) 411 return; 412 413 /* 414 * A bio has been dispatched. Also adjust slice_end. It might happen 415 * that initially cgroup limit was very low resulting in high 416 * slice_end, but later limit was bumped up and bio was dispached 417 * sooner, then we need to reduce slice_end. A high bogus slice_end 418 * is bad because it does not allow new slice to start. 419 */ 420 421 throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice); 422 423 time_elapsed = jiffies - tg->slice_start[rw]; 424 425 nr_slices = time_elapsed / throtl_slice; 426 427 if (!nr_slices) 428 return; 429 tmp = tg->bps[rw] * throtl_slice * nr_slices; 430 do_div(tmp, HZ); 431 bytes_trim = tmp; 432 433 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ; 434 435 if (!bytes_trim && !io_trim) 436 return; 437 438 if (tg->bytes_disp[rw] >= bytes_trim) 439 tg->bytes_disp[rw] -= bytes_trim; 440 else 441 tg->bytes_disp[rw] = 0; 442 443 if (tg->io_disp[rw] >= io_trim) 444 tg->io_disp[rw] -= io_trim; 445 else 446 tg->io_disp[rw] = 0; 447 448 tg->slice_start[rw] += nr_slices * throtl_slice; 449 450 throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu" 451 " start=%lu end=%lu jiffies=%lu", 452 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim, 453 tg->slice_start[rw], tg->slice_end[rw], jiffies); 454 } 455 456 static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg, 457 struct bio *bio, unsigned long *wait) 458 { 459 bool rw = bio_data_dir(bio); 460 unsigned int io_allowed; 461 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; 462 u64 tmp; 463 464 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; 465 466 /* Slice has just started. Consider one slice interval */ 467 if (!jiffy_elapsed) 468 jiffy_elapsed_rnd = throtl_slice; 469 470 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice); 471 472 /* 473 * jiffy_elapsed_rnd should not be a big value as minimum iops can be 474 * 1 then at max jiffy elapsed should be equivalent of 1 second as we 475 * will allow dispatch after 1 second and after that slice should 476 * have been trimmed. 477 */ 478 479 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd; 480 do_div(tmp, HZ); 481 482 if (tmp > UINT_MAX) 483 io_allowed = UINT_MAX; 484 else 485 io_allowed = tmp; 486 487 if (tg->io_disp[rw] + 1 <= io_allowed) { 488 if (wait) 489 *wait = 0; 490 return 1; 491 } 492 493 /* Calc approx time to dispatch */ 494 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1; 495 496 if (jiffy_wait > jiffy_elapsed) 497 jiffy_wait = jiffy_wait - jiffy_elapsed; 498 else 499 jiffy_wait = 1; 500 501 if (wait) 502 *wait = jiffy_wait; 503 return 0; 504 } 505 506 static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg, 507 struct bio *bio, unsigned long *wait) 508 { 509 bool rw = bio_data_dir(bio); 510 u64 bytes_allowed, extra_bytes, tmp; 511 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; 512 513 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; 514 515 /* Slice has just started. Consider one slice interval */ 516 if (!jiffy_elapsed) 517 jiffy_elapsed_rnd = throtl_slice; 518 519 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice); 520 521 tmp = tg->bps[rw] * jiffy_elapsed_rnd; 522 do_div(tmp, HZ); 523 bytes_allowed = tmp; 524 525 if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) { 526 if (wait) 527 *wait = 0; 528 return 1; 529 } 530 531 /* Calc approx time to dispatch */ 532 extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed; 533 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]); 534 535 if (!jiffy_wait) 536 jiffy_wait = 1; 537 538 /* 539 * This wait time is without taking into consideration the rounding 540 * up we did. Add that time also. 541 */ 542 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed); 543 if (wait) 544 *wait = jiffy_wait; 545 return 0; 546 } 547 548 /* 549 * Returns whether one can dispatch a bio or not. Also returns approx number 550 * of jiffies to wait before this bio is with-in IO rate and can be dispatched 551 */ 552 static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg, 553 struct bio *bio, unsigned long *wait) 554 { 555 bool rw = bio_data_dir(bio); 556 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0; 557 558 /* 559 * Currently whole state machine of group depends on first bio 560 * queued in the group bio list. So one should not be calling 561 * this function with a different bio if there are other bios 562 * queued. 563 */ 564 BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw])); 565 566 /* If tg->bps = -1, then BW is unlimited */ 567 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) { 568 if (wait) 569 *wait = 0; 570 return 1; 571 } 572 573 /* 574 * If previous slice expired, start a new one otherwise renew/extend 575 * existing slice to make sure it is at least throtl_slice interval 576 * long since now. 577 */ 578 if (throtl_slice_used(td, tg, rw)) 579 throtl_start_new_slice(td, tg, rw); 580 else { 581 if (time_before(tg->slice_end[rw], jiffies + throtl_slice)) 582 throtl_extend_slice(td, tg, rw, jiffies + throtl_slice); 583 } 584 585 if (tg_with_in_bps_limit(td, tg, bio, &bps_wait) 586 && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) { 587 if (wait) 588 *wait = 0; 589 return 1; 590 } 591 592 max_wait = max(bps_wait, iops_wait); 593 594 if (wait) 595 *wait = max_wait; 596 597 if (time_before(tg->slice_end[rw], jiffies + max_wait)) 598 throtl_extend_slice(td, tg, rw, jiffies + max_wait); 599 600 return 0; 601 } 602 603 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) 604 { 605 bool rw = bio_data_dir(bio); 606 bool sync = bio->bi_rw & REQ_SYNC; 607 608 /* Charge the bio to the group */ 609 tg->bytes_disp[rw] += bio->bi_size; 610 tg->io_disp[rw]++; 611 612 /* 613 * TODO: This will take blkg->stats_lock. Figure out a way 614 * to avoid this cost. 615 */ 616 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync); 617 } 618 619 static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg, 620 struct bio *bio) 621 { 622 bool rw = bio_data_dir(bio); 623 624 bio_list_add(&tg->bio_lists[rw], bio); 625 /* Take a bio reference on tg */ 626 throtl_ref_get_tg(tg); 627 tg->nr_queued[rw]++; 628 td->nr_queued[rw]++; 629 throtl_enqueue_tg(td, tg); 630 } 631 632 static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg) 633 { 634 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime; 635 struct bio *bio; 636 637 if ((bio = bio_list_peek(&tg->bio_lists[READ]))) 638 tg_may_dispatch(td, tg, bio, &read_wait); 639 640 if ((bio = bio_list_peek(&tg->bio_lists[WRITE]))) 641 tg_may_dispatch(td, tg, bio, &write_wait); 642 643 min_wait = min(read_wait, write_wait); 644 disptime = jiffies + min_wait; 645 646 /* Update dispatch time */ 647 throtl_dequeue_tg(td, tg); 648 tg->disptime = disptime; 649 throtl_enqueue_tg(td, tg); 650 } 651 652 static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg, 653 bool rw, struct bio_list *bl) 654 { 655 struct bio *bio; 656 657 bio = bio_list_pop(&tg->bio_lists[rw]); 658 tg->nr_queued[rw]--; 659 /* Drop bio reference on tg */ 660 throtl_put_tg(tg); 661 662 BUG_ON(td->nr_queued[rw] <= 0); 663 td->nr_queued[rw]--; 664 665 throtl_charge_bio(tg, bio); 666 bio_list_add(bl, bio); 667 bio->bi_rw |= REQ_THROTTLED; 668 669 throtl_trim_slice(td, tg, rw); 670 } 671 672 static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg, 673 struct bio_list *bl) 674 { 675 unsigned int nr_reads = 0, nr_writes = 0; 676 unsigned int max_nr_reads = throtl_grp_quantum*3/4; 677 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads; 678 struct bio *bio; 679 680 /* Try to dispatch 75% READS and 25% WRITES */ 681 682 while ((bio = bio_list_peek(&tg->bio_lists[READ])) 683 && tg_may_dispatch(td, tg, bio, NULL)) { 684 685 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl); 686 nr_reads++; 687 688 if (nr_reads >= max_nr_reads) 689 break; 690 } 691 692 while ((bio = bio_list_peek(&tg->bio_lists[WRITE])) 693 && tg_may_dispatch(td, tg, bio, NULL)) { 694 695 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl); 696 nr_writes++; 697 698 if (nr_writes >= max_nr_writes) 699 break; 700 } 701 702 return nr_reads + nr_writes; 703 } 704 705 static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl) 706 { 707 unsigned int nr_disp = 0; 708 struct throtl_grp *tg; 709 struct throtl_rb_root *st = &td->tg_service_tree; 710 711 while (1) { 712 tg = throtl_rb_first(st); 713 714 if (!tg) 715 break; 716 717 if (time_before(jiffies, tg->disptime)) 718 break; 719 720 throtl_dequeue_tg(td, tg); 721 722 nr_disp += throtl_dispatch_tg(td, tg, bl); 723 724 if (tg->nr_queued[0] || tg->nr_queued[1]) { 725 tg_update_disptime(td, tg); 726 throtl_enqueue_tg(td, tg); 727 } 728 729 if (nr_disp >= throtl_quantum) 730 break; 731 } 732 733 return nr_disp; 734 } 735 736 static void throtl_process_limit_change(struct throtl_data *td) 737 { 738 struct throtl_grp *tg; 739 struct hlist_node *pos, *n; 740 741 if (!td->limits_changed) 742 return; 743 744 xchg(&td->limits_changed, false); 745 746 throtl_log(td, "limits changed"); 747 748 hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) { 749 if (!tg->limits_changed) 750 continue; 751 752 if (!xchg(&tg->limits_changed, false)) 753 continue; 754 755 throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu" 756 " riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE], 757 tg->iops[READ], tg->iops[WRITE]); 758 759 /* 760 * Restart the slices for both READ and WRITES. It 761 * might happen that a group's limit are dropped 762 * suddenly and we don't want to account recently 763 * dispatched IO with new low rate 764 */ 765 throtl_start_new_slice(td, tg, 0); 766 throtl_start_new_slice(td, tg, 1); 767 768 if (throtl_tg_on_rr(tg)) 769 tg_update_disptime(td, tg); 770 } 771 } 772 773 /* Dispatch throttled bios. Should be called without queue lock held. */ 774 static int throtl_dispatch(struct request_queue *q) 775 { 776 struct throtl_data *td = q->td; 777 unsigned int nr_disp = 0; 778 struct bio_list bio_list_on_stack; 779 struct bio *bio; 780 struct blk_plug plug; 781 782 spin_lock_irq(q->queue_lock); 783 784 throtl_process_limit_change(td); 785 786 if (!total_nr_queued(td)) 787 goto out; 788 789 bio_list_init(&bio_list_on_stack); 790 791 throtl_log(td, "dispatch nr_queued=%lu read=%u write=%u", 792 total_nr_queued(td), td->nr_queued[READ], 793 td->nr_queued[WRITE]); 794 795 nr_disp = throtl_select_dispatch(td, &bio_list_on_stack); 796 797 if (nr_disp) 798 throtl_log(td, "bios disp=%u", nr_disp); 799 800 throtl_schedule_next_dispatch(td); 801 out: 802 spin_unlock_irq(q->queue_lock); 803 804 /* 805 * If we dispatched some requests, unplug the queue to make sure 806 * immediate dispatch 807 */ 808 if (nr_disp) { 809 blk_start_plug(&plug); 810 while((bio = bio_list_pop(&bio_list_on_stack))) 811 generic_make_request(bio); 812 blk_finish_plug(&plug); 813 } 814 return nr_disp; 815 } 816 817 void blk_throtl_work(struct work_struct *work) 818 { 819 struct throtl_data *td = container_of(work, struct throtl_data, 820 throtl_work.work); 821 struct request_queue *q = td->queue; 822 823 throtl_dispatch(q); 824 } 825 826 /* Call with queue lock held */ 827 static void 828 throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay) 829 { 830 831 struct delayed_work *dwork = &td->throtl_work; 832 833 /* schedule work if limits changed even if no bio is queued */ 834 if (total_nr_queued(td) > 0 || td->limits_changed) { 835 /* 836 * We might have a work scheduled to be executed in future. 837 * Cancel that and schedule a new one. 838 */ 839 __cancel_delayed_work(dwork); 840 queue_delayed_work(kthrotld_workqueue, dwork, delay); 841 throtl_log(td, "schedule work. delay=%lu jiffies=%lu", 842 delay, jiffies); 843 } 844 } 845 846 static void 847 throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg) 848 { 849 /* Something wrong if we are trying to remove same group twice */ 850 BUG_ON(hlist_unhashed(&tg->tg_node)); 851 852 hlist_del_init(&tg->tg_node); 853 854 /* 855 * Put the reference taken at the time of creation so that when all 856 * queues are gone, group can be destroyed. 857 */ 858 throtl_put_tg(tg); 859 td->nr_undestroyed_grps--; 860 } 861 862 static void throtl_release_tgs(struct throtl_data *td) 863 { 864 struct hlist_node *pos, *n; 865 struct throtl_grp *tg; 866 867 hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) { 868 /* 869 * If cgroup removal path got to blk_group first and removed 870 * it from cgroup list, then it will take care of destroying 871 * cfqg also. 872 */ 873 if (!blkiocg_del_blkio_group(&tg->blkg)) 874 throtl_destroy_tg(td, tg); 875 } 876 } 877 878 static void throtl_td_free(struct throtl_data *td) 879 { 880 kfree(td); 881 } 882 883 /* 884 * Blk cgroup controller notification saying that blkio_group object is being 885 * delinked as associated cgroup object is going away. That also means that 886 * no new IO will come in this group. So get rid of this group as soon as 887 * any pending IO in the group is finished. 888 * 889 * This function is called under rcu_read_lock(). key is the rcu protected 890 * pointer. That means "key" is a valid throtl_data pointer as long as we are 891 * rcu read lock. 892 * 893 * "key" was fetched from blkio_group under blkio_cgroup->lock. That means 894 * it should not be NULL as even if queue was going away, cgroup deltion 895 * path got to it first. 896 */ 897 void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg) 898 { 899 unsigned long flags; 900 struct throtl_data *td = key; 901 902 spin_lock_irqsave(td->queue->queue_lock, flags); 903 throtl_destroy_tg(td, tg_of_blkg(blkg)); 904 spin_unlock_irqrestore(td->queue->queue_lock, flags); 905 } 906 907 static void throtl_update_blkio_group_common(struct throtl_data *td, 908 struct throtl_grp *tg) 909 { 910 xchg(&tg->limits_changed, true); 911 xchg(&td->limits_changed, true); 912 /* Schedule a work now to process the limit change */ 913 throtl_schedule_delayed_work(td, 0); 914 } 915 916 /* 917 * For all update functions, key should be a valid pointer because these 918 * update functions are called under blkcg_lock, that means, blkg is 919 * valid and in turn key is valid. queue exit path can not race because 920 * of blkcg_lock 921 * 922 * Can not take queue lock in update functions as queue lock under blkcg_lock 923 * is not allowed. Under other paths we take blkcg_lock under queue_lock. 924 */ 925 static void throtl_update_blkio_group_read_bps(void *key, 926 struct blkio_group *blkg, u64 read_bps) 927 { 928 struct throtl_data *td = key; 929 struct throtl_grp *tg = tg_of_blkg(blkg); 930 931 tg->bps[READ] = read_bps; 932 throtl_update_blkio_group_common(td, tg); 933 } 934 935 static void throtl_update_blkio_group_write_bps(void *key, 936 struct blkio_group *blkg, u64 write_bps) 937 { 938 struct throtl_data *td = key; 939 struct throtl_grp *tg = tg_of_blkg(blkg); 940 941 tg->bps[WRITE] = write_bps; 942 throtl_update_blkio_group_common(td, tg); 943 } 944 945 static void throtl_update_blkio_group_read_iops(void *key, 946 struct blkio_group *blkg, unsigned int read_iops) 947 { 948 struct throtl_data *td = key; 949 struct throtl_grp *tg = tg_of_blkg(blkg); 950 951 tg->iops[READ] = read_iops; 952 throtl_update_blkio_group_common(td, tg); 953 } 954 955 static void throtl_update_blkio_group_write_iops(void *key, 956 struct blkio_group *blkg, unsigned int write_iops) 957 { 958 struct throtl_data *td = key; 959 struct throtl_grp *tg = tg_of_blkg(blkg); 960 961 tg->iops[WRITE] = write_iops; 962 throtl_update_blkio_group_common(td, tg); 963 } 964 965 static void throtl_shutdown_wq(struct request_queue *q) 966 { 967 struct throtl_data *td = q->td; 968 969 cancel_delayed_work_sync(&td->throtl_work); 970 } 971 972 static struct blkio_policy_type blkio_policy_throtl = { 973 .ops = { 974 .blkio_unlink_group_fn = throtl_unlink_blkio_group, 975 .blkio_update_group_read_bps_fn = 976 throtl_update_blkio_group_read_bps, 977 .blkio_update_group_write_bps_fn = 978 throtl_update_blkio_group_write_bps, 979 .blkio_update_group_read_iops_fn = 980 throtl_update_blkio_group_read_iops, 981 .blkio_update_group_write_iops_fn = 982 throtl_update_blkio_group_write_iops, 983 }, 984 .plid = BLKIO_POLICY_THROTL, 985 }; 986 987 int blk_throtl_bio(struct request_queue *q, struct bio **biop) 988 { 989 struct throtl_data *td = q->td; 990 struct throtl_grp *tg; 991 struct bio *bio = *biop; 992 bool rw = bio_data_dir(bio), update_disptime = true; 993 994 if (bio->bi_rw & REQ_THROTTLED) { 995 bio->bi_rw &= ~REQ_THROTTLED; 996 return 0; 997 } 998 999 spin_lock_irq(q->queue_lock); 1000 tg = throtl_get_tg(td); 1001 1002 if (tg->nr_queued[rw]) { 1003 /* 1004 * There is already another bio queued in same dir. No 1005 * need to update dispatch time. 1006 */ 1007 update_disptime = false; 1008 goto queue_bio; 1009 1010 } 1011 1012 /* Bio is with-in rate limit of group */ 1013 if (tg_may_dispatch(td, tg, bio, NULL)) { 1014 throtl_charge_bio(tg, bio); 1015 1016 /* 1017 * We need to trim slice even when bios are not being queued 1018 * otherwise it might happen that a bio is not queued for 1019 * a long time and slice keeps on extending and trim is not 1020 * called for a long time. Now if limits are reduced suddenly 1021 * we take into account all the IO dispatched so far at new 1022 * low rate and * newly queued IO gets a really long dispatch 1023 * time. 1024 * 1025 * So keep on trimming slice even if bio is not queued. 1026 */ 1027 throtl_trim_slice(td, tg, rw); 1028 goto out; 1029 } 1030 1031 queue_bio: 1032 throtl_log_tg(td, tg, "[%c] bio. bdisp=%u sz=%u bps=%llu" 1033 " iodisp=%u iops=%u queued=%d/%d", 1034 rw == READ ? 'R' : 'W', 1035 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw], 1036 tg->io_disp[rw], tg->iops[rw], 1037 tg->nr_queued[READ], tg->nr_queued[WRITE]); 1038 1039 throtl_add_bio_tg(q->td, tg, bio); 1040 *biop = NULL; 1041 1042 if (update_disptime) { 1043 tg_update_disptime(td, tg); 1044 throtl_schedule_next_dispatch(td); 1045 } 1046 1047 out: 1048 spin_unlock_irq(q->queue_lock); 1049 return 0; 1050 } 1051 1052 int blk_throtl_init(struct request_queue *q) 1053 { 1054 struct throtl_data *td; 1055 struct throtl_grp *tg; 1056 1057 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node); 1058 if (!td) 1059 return -ENOMEM; 1060 1061 INIT_HLIST_HEAD(&td->tg_list); 1062 td->tg_service_tree = THROTL_RB_ROOT; 1063 td->limits_changed = false; 1064 1065 /* Init root group */ 1066 tg = &td->root_tg; 1067 INIT_HLIST_NODE(&tg->tg_node); 1068 RB_CLEAR_NODE(&tg->rb_node); 1069 bio_list_init(&tg->bio_lists[0]); 1070 bio_list_init(&tg->bio_lists[1]); 1071 1072 /* Practically unlimited BW */ 1073 tg->bps[0] = tg->bps[1] = -1; 1074 tg->iops[0] = tg->iops[1] = -1; 1075 td->limits_changed = false; 1076 1077 /* 1078 * Set root group reference to 2. One reference will be dropped when 1079 * all groups on tg_list are being deleted during queue exit. Other 1080 * reference will remain there as we don't want to delete this group 1081 * as it is statically allocated and gets destroyed when throtl_data 1082 * goes away. 1083 */ 1084 atomic_set(&tg->ref, 2); 1085 hlist_add_head(&tg->tg_node, &td->tg_list); 1086 td->nr_undestroyed_grps++; 1087 1088 INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work); 1089 1090 rcu_read_lock(); 1091 blkiocg_add_blkio_group(&blkio_root_cgroup, &tg->blkg, (void *)td, 1092 0, BLKIO_POLICY_THROTL); 1093 rcu_read_unlock(); 1094 1095 /* Attach throtl data to request queue */ 1096 td->queue = q; 1097 q->td = td; 1098 return 0; 1099 } 1100 1101 void blk_throtl_exit(struct request_queue *q) 1102 { 1103 struct throtl_data *td = q->td; 1104 bool wait = false; 1105 1106 BUG_ON(!td); 1107 1108 throtl_shutdown_wq(q); 1109 1110 spin_lock_irq(q->queue_lock); 1111 throtl_release_tgs(td); 1112 1113 /* If there are other groups */ 1114 if (td->nr_undestroyed_grps > 0) 1115 wait = true; 1116 1117 spin_unlock_irq(q->queue_lock); 1118 1119 /* 1120 * Wait for tg->blkg->key accessors to exit their grace periods. 1121 * Do this wait only if there are other undestroyed groups out 1122 * there (other than root group). This can happen if cgroup deletion 1123 * path claimed the responsibility of cleaning up a group before 1124 * queue cleanup code get to the group. 1125 * 1126 * Do not call synchronize_rcu() unconditionally as there are drivers 1127 * which create/delete request queue hundreds of times during scan/boot 1128 * and synchronize_rcu() can take significant time and slow down boot. 1129 */ 1130 if (wait) 1131 synchronize_rcu(); 1132 1133 /* 1134 * Just being safe to make sure after previous flush if some body did 1135 * update limits through cgroup and another work got queued, cancel 1136 * it. 1137 */ 1138 throtl_shutdown_wq(q); 1139 throtl_td_free(td); 1140 } 1141 1142 static int __init throtl_init(void) 1143 { 1144 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0); 1145 if (!kthrotld_workqueue) 1146 panic("Failed to create kthrotld\n"); 1147 1148 blkio_policy_register(&blkio_policy_throtl); 1149 return 0; 1150 } 1151 1152 module_init(throtl_init); 1153