1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Common Block IO controller cgroup interface 4 * 5 * Based on ideas and code from CFQ, CFS and BFQ: 6 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 7 * 8 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> 9 * Paolo Valente <paolo.valente@unimore.it> 10 * 11 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> 12 * Nauman Rafique <nauman@google.com> 13 * 14 * For policy-specific per-blkcg data: 15 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it> 16 * Arianna Avanzini <avanzini.arianna@gmail.com> 17 */ 18 #include <linux/ioprio.h> 19 #include <linux/kdev_t.h> 20 #include <linux/module.h> 21 #include <linux/sched/signal.h> 22 #include <linux/err.h> 23 #include <linux/blkdev.h> 24 #include <linux/backing-dev.h> 25 #include <linux/slab.h> 26 #include <linux/delay.h> 27 #include <linux/atomic.h> 28 #include <linux/ctype.h> 29 #include <linux/resume_user_mode.h> 30 #include <linux/psi.h> 31 #include <linux/part_stat.h> 32 #include "blk.h" 33 #include "blk-cgroup.h" 34 #include "blk-ioprio.h" 35 #include "blk-throttle.h" 36 37 static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu); 38 39 /* 40 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation. 41 * blkcg_pol_register_mutex nests outside of it and synchronizes entire 42 * policy [un]register operations including cgroup file additions / 43 * removals. Putting cgroup file registration outside blkcg_pol_mutex 44 * allows grabbing it from cgroup callbacks. 45 */ 46 static DEFINE_MUTEX(blkcg_pol_register_mutex); 47 static DEFINE_MUTEX(blkcg_pol_mutex); 48 49 struct blkcg blkcg_root; 50 EXPORT_SYMBOL_GPL(blkcg_root); 51 52 struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css; 53 EXPORT_SYMBOL_GPL(blkcg_root_css); 54 55 static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; 56 57 static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */ 58 59 bool blkcg_debug_stats = false; 60 61 static DEFINE_RAW_SPINLOCK(blkg_stat_lock); 62 63 #define BLKG_DESTROY_BATCH_SIZE 64 64 65 /* 66 * Lockless lists for tracking IO stats update 67 * 68 * New IO stats are stored in the percpu iostat_cpu within blkcg_gq (blkg). 69 * There are multiple blkg's (one for each block device) attached to each 70 * blkcg. The rstat code keeps track of which cpu has IO stats updated, 71 * but it doesn't know which blkg has the updated stats. If there are many 72 * block devices in a system, the cost of iterating all the blkg's to flush 73 * out the IO stats can be high. To reduce such overhead, a set of percpu 74 * lockless lists (lhead) per blkcg are used to track the set of recently 75 * updated iostat_cpu's since the last flush. An iostat_cpu will be put 76 * onto the lockless list on the update side [blk_cgroup_bio_start()] if 77 * not there yet and then removed when being flushed [blkcg_rstat_flush()]. 78 * References to blkg are gotten and then put back in the process to 79 * protect against blkg removal. 80 * 81 * Return: 0 if successful or -ENOMEM if allocation fails. 82 */ 83 static int init_blkcg_llists(struct blkcg *blkcg) 84 { 85 int cpu; 86 87 blkcg->lhead = alloc_percpu_gfp(struct llist_head, GFP_KERNEL); 88 if (!blkcg->lhead) 89 return -ENOMEM; 90 91 for_each_possible_cpu(cpu) 92 init_llist_head(per_cpu_ptr(blkcg->lhead, cpu)); 93 return 0; 94 } 95 96 /** 97 * blkcg_css - find the current css 98 * 99 * Find the css associated with either the kthread or the current task. 100 * This may return a dying css, so it is up to the caller to use tryget logic 101 * to confirm it is alive and well. 102 */ 103 static struct cgroup_subsys_state *blkcg_css(void) 104 { 105 struct cgroup_subsys_state *css; 106 107 css = kthread_blkcg(); 108 if (css) 109 return css; 110 return task_css(current, io_cgrp_id); 111 } 112 113 static bool blkcg_policy_enabled(struct request_queue *q, 114 const struct blkcg_policy *pol) 115 { 116 return pol && test_bit(pol->plid, q->blkcg_pols); 117 } 118 119 static void blkg_free_workfn(struct work_struct *work) 120 { 121 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, 122 free_work); 123 struct request_queue *q = blkg->q; 124 int i; 125 126 /* 127 * pd_free_fn() can also be called from blkcg_deactivate_policy(), 128 * in order to make sure pd_free_fn() is called in order, the deletion 129 * of the list blkg->q_node is delayed to here from blkg_destroy(), and 130 * blkcg_mutex is used to synchronize blkg_free_workfn() and 131 * blkcg_deactivate_policy(). 132 */ 133 mutex_lock(&q->blkcg_mutex); 134 for (i = 0; i < BLKCG_MAX_POLS; i++) 135 if (blkg->pd[i]) 136 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); 137 if (blkg->parent) 138 blkg_put(blkg->parent); 139 spin_lock_irq(&q->queue_lock); 140 list_del_init(&blkg->q_node); 141 spin_unlock_irq(&q->queue_lock); 142 mutex_unlock(&q->blkcg_mutex); 143 144 blk_put_queue(q); 145 free_percpu(blkg->iostat_cpu); 146 percpu_ref_exit(&blkg->refcnt); 147 kfree(blkg); 148 } 149 150 /** 151 * blkg_free - free a blkg 152 * @blkg: blkg to free 153 * 154 * Free @blkg which may be partially allocated. 155 */ 156 static void blkg_free(struct blkcg_gq *blkg) 157 { 158 if (!blkg) 159 return; 160 161 /* 162 * Both ->pd_free_fn() and request queue's release handler may 163 * sleep, so free us by scheduling one work func 164 */ 165 INIT_WORK(&blkg->free_work, blkg_free_workfn); 166 schedule_work(&blkg->free_work); 167 } 168 169 static void __blkg_release(struct rcu_head *rcu) 170 { 171 struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head); 172 struct blkcg *blkcg = blkg->blkcg; 173 int cpu; 174 175 #ifdef CONFIG_BLK_CGROUP_PUNT_BIO 176 WARN_ON(!bio_list_empty(&blkg->async_bios)); 177 #endif 178 /* 179 * Flush all the non-empty percpu lockless lists before releasing 180 * us, given these stat belongs to us. 181 * 182 * blkg_stat_lock is for serializing blkg stat update 183 */ 184 for_each_possible_cpu(cpu) 185 __blkcg_rstat_flush(blkcg, cpu); 186 187 /* release the blkcg and parent blkg refs this blkg has been holding */ 188 css_put(&blkg->blkcg->css); 189 blkg_free(blkg); 190 } 191 192 /* 193 * A group is RCU protected, but having an rcu lock does not mean that one 194 * can access all the fields of blkg and assume these are valid. For 195 * example, don't try to follow throtl_data and request queue links. 196 * 197 * Having a reference to blkg under an rcu allows accesses to only values 198 * local to groups like group stats and group rate limits. 199 */ 200 static void blkg_release(struct percpu_ref *ref) 201 { 202 struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt); 203 204 call_rcu(&blkg->rcu_head, __blkg_release); 205 } 206 207 #ifdef CONFIG_BLK_CGROUP_PUNT_BIO 208 static struct workqueue_struct *blkcg_punt_bio_wq; 209 210 static void blkg_async_bio_workfn(struct work_struct *work) 211 { 212 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, 213 async_bio_work); 214 struct bio_list bios = BIO_EMPTY_LIST; 215 struct bio *bio; 216 struct blk_plug plug; 217 bool need_plug = false; 218 219 /* as long as there are pending bios, @blkg can't go away */ 220 spin_lock(&blkg->async_bio_lock); 221 bio_list_merge_init(&bios, &blkg->async_bios); 222 spin_unlock(&blkg->async_bio_lock); 223 224 /* start plug only when bio_list contains at least 2 bios */ 225 if (bios.head && bios.head->bi_next) { 226 need_plug = true; 227 blk_start_plug(&plug); 228 } 229 while ((bio = bio_list_pop(&bios))) 230 submit_bio(bio); 231 if (need_plug) 232 blk_finish_plug(&plug); 233 } 234 235 /* 236 * When a shared kthread issues a bio for a cgroup, doing so synchronously can 237 * lead to priority inversions as the kthread can be trapped waiting for that 238 * cgroup. Use this helper instead of submit_bio to punt the actual issuing to 239 * a dedicated per-blkcg work item to avoid such priority inversions. 240 */ 241 void blkcg_punt_bio_submit(struct bio *bio) 242 { 243 struct blkcg_gq *blkg = bio->bi_blkg; 244 245 if (blkg->parent) { 246 spin_lock(&blkg->async_bio_lock); 247 bio_list_add(&blkg->async_bios, bio); 248 spin_unlock(&blkg->async_bio_lock); 249 queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work); 250 } else { 251 /* never bounce for the root cgroup */ 252 submit_bio(bio); 253 } 254 } 255 EXPORT_SYMBOL_GPL(blkcg_punt_bio_submit); 256 257 static int __init blkcg_punt_bio_init(void) 258 { 259 blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio", 260 WQ_MEM_RECLAIM | WQ_FREEZABLE | 261 WQ_UNBOUND | WQ_SYSFS, 0); 262 if (!blkcg_punt_bio_wq) 263 return -ENOMEM; 264 return 0; 265 } 266 subsys_initcall(blkcg_punt_bio_init); 267 #endif /* CONFIG_BLK_CGROUP_PUNT_BIO */ 268 269 /** 270 * bio_blkcg_css - return the blkcg CSS associated with a bio 271 * @bio: target bio 272 * 273 * This returns the CSS for the blkcg associated with a bio, or %NULL if not 274 * associated. Callers are expected to either handle %NULL or know association 275 * has been done prior to calling this. 276 */ 277 struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio) 278 { 279 if (!bio || !bio->bi_blkg) 280 return NULL; 281 return &bio->bi_blkg->blkcg->css; 282 } 283 EXPORT_SYMBOL_GPL(bio_blkcg_css); 284 285 /** 286 * blkcg_parent - get the parent of a blkcg 287 * @blkcg: blkcg of interest 288 * 289 * Return the parent blkcg of @blkcg. Can be called anytime. 290 */ 291 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg) 292 { 293 return css_to_blkcg(blkcg->css.parent); 294 } 295 296 /** 297 * blkg_alloc - allocate a blkg 298 * @blkcg: block cgroup the new blkg is associated with 299 * @disk: gendisk the new blkg is associated with 300 * @gfp_mask: allocation mask to use 301 * 302 * Allocate a new blkg associating @blkcg and @disk. 303 */ 304 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk, 305 gfp_t gfp_mask) 306 { 307 struct blkcg_gq *blkg; 308 int i, cpu; 309 310 /* alloc and init base part */ 311 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, disk->queue->node); 312 if (!blkg) 313 return NULL; 314 if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask)) 315 goto out_free_blkg; 316 blkg->iostat_cpu = alloc_percpu_gfp(struct blkg_iostat_set, gfp_mask); 317 if (!blkg->iostat_cpu) 318 goto out_exit_refcnt; 319 if (!blk_get_queue(disk->queue)) 320 goto out_free_iostat; 321 322 blkg->q = disk->queue; 323 INIT_LIST_HEAD(&blkg->q_node); 324 blkg->blkcg = blkcg; 325 blkg->iostat.blkg = blkg; 326 #ifdef CONFIG_BLK_CGROUP_PUNT_BIO 327 spin_lock_init(&blkg->async_bio_lock); 328 bio_list_init(&blkg->async_bios); 329 INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn); 330 #endif 331 332 u64_stats_init(&blkg->iostat.sync); 333 for_each_possible_cpu(cpu) { 334 u64_stats_init(&per_cpu_ptr(blkg->iostat_cpu, cpu)->sync); 335 per_cpu_ptr(blkg->iostat_cpu, cpu)->blkg = blkg; 336 } 337 338 for (i = 0; i < BLKCG_MAX_POLS; i++) { 339 struct blkcg_policy *pol = blkcg_policy[i]; 340 struct blkg_policy_data *pd; 341 342 if (!blkcg_policy_enabled(disk->queue, pol)) 343 continue; 344 345 /* alloc per-policy data and attach it to blkg */ 346 pd = pol->pd_alloc_fn(disk, blkcg, gfp_mask); 347 if (!pd) 348 goto out_free_pds; 349 blkg->pd[i] = pd; 350 pd->blkg = blkg; 351 pd->plid = i; 352 pd->online = false; 353 } 354 355 return blkg; 356 357 out_free_pds: 358 while (--i >= 0) 359 if (blkg->pd[i]) 360 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); 361 blk_put_queue(disk->queue); 362 out_free_iostat: 363 free_percpu(blkg->iostat_cpu); 364 out_exit_refcnt: 365 percpu_ref_exit(&blkg->refcnt); 366 out_free_blkg: 367 kfree(blkg); 368 return NULL; 369 } 370 371 /* 372 * If @new_blkg is %NULL, this function tries to allocate a new one as 373 * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return. 374 */ 375 static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct gendisk *disk, 376 struct blkcg_gq *new_blkg) 377 { 378 struct blkcg_gq *blkg; 379 int i, ret; 380 381 lockdep_assert_held(&disk->queue->queue_lock); 382 383 /* request_queue is dying, do not create/recreate a blkg */ 384 if (blk_queue_dying(disk->queue)) { 385 ret = -ENODEV; 386 goto err_free_blkg; 387 } 388 389 /* blkg holds a reference to blkcg */ 390 if (!css_tryget_online(&blkcg->css)) { 391 ret = -ENODEV; 392 goto err_free_blkg; 393 } 394 395 /* allocate */ 396 if (!new_blkg) { 397 new_blkg = blkg_alloc(blkcg, disk, GFP_NOWAIT); 398 if (unlikely(!new_blkg)) { 399 ret = -ENOMEM; 400 goto err_put_css; 401 } 402 } 403 blkg = new_blkg; 404 405 /* link parent */ 406 if (blkcg_parent(blkcg)) { 407 blkg->parent = blkg_lookup(blkcg_parent(blkcg), disk->queue); 408 if (WARN_ON_ONCE(!blkg->parent)) { 409 ret = -ENODEV; 410 goto err_put_css; 411 } 412 blkg_get(blkg->parent); 413 } 414 415 /* invoke per-policy init */ 416 for (i = 0; i < BLKCG_MAX_POLS; i++) { 417 struct blkcg_policy *pol = blkcg_policy[i]; 418 419 if (blkg->pd[i] && pol->pd_init_fn) 420 pol->pd_init_fn(blkg->pd[i]); 421 } 422 423 /* insert */ 424 spin_lock(&blkcg->lock); 425 ret = radix_tree_insert(&blkcg->blkg_tree, disk->queue->id, blkg); 426 if (likely(!ret)) { 427 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); 428 list_add(&blkg->q_node, &disk->queue->blkg_list); 429 430 for (i = 0; i < BLKCG_MAX_POLS; i++) { 431 struct blkcg_policy *pol = blkcg_policy[i]; 432 433 if (blkg->pd[i]) { 434 if (pol->pd_online_fn) 435 pol->pd_online_fn(blkg->pd[i]); 436 blkg->pd[i]->online = true; 437 } 438 } 439 } 440 blkg->online = true; 441 spin_unlock(&blkcg->lock); 442 443 if (!ret) 444 return blkg; 445 446 /* @blkg failed fully initialized, use the usual release path */ 447 blkg_put(blkg); 448 return ERR_PTR(ret); 449 450 err_put_css: 451 css_put(&blkcg->css); 452 err_free_blkg: 453 if (new_blkg) 454 blkg_free(new_blkg); 455 return ERR_PTR(ret); 456 } 457 458 /** 459 * blkg_lookup_create - lookup blkg, try to create one if not there 460 * @blkcg: blkcg of interest 461 * @disk: gendisk of interest 462 * 463 * Lookup blkg for the @blkcg - @disk pair. If it doesn't exist, try to 464 * create one. blkg creation is performed recursively from blkcg_root such 465 * that all non-root blkg's have access to the parent blkg. This function 466 * should be called under RCU read lock and takes @disk->queue->queue_lock. 467 * 468 * Returns the blkg or the closest blkg if blkg_create() fails as it walks 469 * down from root. 470 */ 471 static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, 472 struct gendisk *disk) 473 { 474 struct request_queue *q = disk->queue; 475 struct blkcg_gq *blkg; 476 unsigned long flags; 477 478 WARN_ON_ONCE(!rcu_read_lock_held()); 479 480 blkg = blkg_lookup(blkcg, q); 481 if (blkg) 482 return blkg; 483 484 spin_lock_irqsave(&q->queue_lock, flags); 485 blkg = blkg_lookup(blkcg, q); 486 if (blkg) { 487 if (blkcg != &blkcg_root && 488 blkg != rcu_dereference(blkcg->blkg_hint)) 489 rcu_assign_pointer(blkcg->blkg_hint, blkg); 490 goto found; 491 } 492 493 /* 494 * Create blkgs walking down from blkcg_root to @blkcg, so that all 495 * non-root blkgs have access to their parents. Returns the closest 496 * blkg to the intended blkg should blkg_create() fail. 497 */ 498 while (true) { 499 struct blkcg *pos = blkcg; 500 struct blkcg *parent = blkcg_parent(blkcg); 501 struct blkcg_gq *ret_blkg = q->root_blkg; 502 503 while (parent) { 504 blkg = blkg_lookup(parent, q); 505 if (blkg) { 506 /* remember closest blkg */ 507 ret_blkg = blkg; 508 break; 509 } 510 pos = parent; 511 parent = blkcg_parent(parent); 512 } 513 514 blkg = blkg_create(pos, disk, NULL); 515 if (IS_ERR(blkg)) { 516 blkg = ret_blkg; 517 break; 518 } 519 if (pos == blkcg) 520 break; 521 } 522 523 found: 524 spin_unlock_irqrestore(&q->queue_lock, flags); 525 return blkg; 526 } 527 528 static void blkg_destroy(struct blkcg_gq *blkg) 529 { 530 struct blkcg *blkcg = blkg->blkcg; 531 int i; 532 533 lockdep_assert_held(&blkg->q->queue_lock); 534 lockdep_assert_held(&blkcg->lock); 535 536 /* 537 * blkg stays on the queue list until blkg_free_workfn(), see details in 538 * blkg_free_workfn(), hence this function can be called from 539 * blkcg_destroy_blkgs() first and again from blkg_destroy_all() before 540 * blkg_free_workfn(). 541 */ 542 if (hlist_unhashed(&blkg->blkcg_node)) 543 return; 544 545 for (i = 0; i < BLKCG_MAX_POLS; i++) { 546 struct blkcg_policy *pol = blkcg_policy[i]; 547 548 if (blkg->pd[i] && blkg->pd[i]->online) { 549 blkg->pd[i]->online = false; 550 if (pol->pd_offline_fn) 551 pol->pd_offline_fn(blkg->pd[i]); 552 } 553 } 554 555 blkg->online = false; 556 557 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); 558 hlist_del_init_rcu(&blkg->blkcg_node); 559 560 /* 561 * Both setting lookup hint to and clearing it from @blkg are done 562 * under queue_lock. If it's not pointing to @blkg now, it never 563 * will. Hint assignment itself can race safely. 564 */ 565 if (rcu_access_pointer(blkcg->blkg_hint) == blkg) 566 rcu_assign_pointer(blkcg->blkg_hint, NULL); 567 568 /* 569 * Put the reference taken at the time of creation so that when all 570 * queues are gone, group can be destroyed. 571 */ 572 percpu_ref_kill(&blkg->refcnt); 573 } 574 575 static void blkg_destroy_all(struct gendisk *disk) 576 { 577 struct request_queue *q = disk->queue; 578 struct blkcg_gq *blkg; 579 int count = BLKG_DESTROY_BATCH_SIZE; 580 int i; 581 582 restart: 583 spin_lock_irq(&q->queue_lock); 584 list_for_each_entry(blkg, &q->blkg_list, q_node) { 585 struct blkcg *blkcg = blkg->blkcg; 586 587 if (hlist_unhashed(&blkg->blkcg_node)) 588 continue; 589 590 spin_lock(&blkcg->lock); 591 blkg_destroy(blkg); 592 spin_unlock(&blkcg->lock); 593 594 /* 595 * in order to avoid holding the spin lock for too long, release 596 * it when a batch of blkgs are destroyed. 597 */ 598 if (!(--count)) { 599 count = BLKG_DESTROY_BATCH_SIZE; 600 spin_unlock_irq(&q->queue_lock); 601 cond_resched(); 602 goto restart; 603 } 604 } 605 606 /* 607 * Mark policy deactivated since policy offline has been done, and 608 * the free is scheduled, so future blkcg_deactivate_policy() can 609 * be bypassed 610 */ 611 for (i = 0; i < BLKCG_MAX_POLS; i++) { 612 struct blkcg_policy *pol = blkcg_policy[i]; 613 614 if (pol) 615 __clear_bit(pol->plid, q->blkcg_pols); 616 } 617 618 q->root_blkg = NULL; 619 spin_unlock_irq(&q->queue_lock); 620 } 621 622 static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src) 623 { 624 int i; 625 626 for (i = 0; i < BLKG_IOSTAT_NR; i++) { 627 dst->bytes[i] = src->bytes[i]; 628 dst->ios[i] = src->ios[i]; 629 } 630 } 631 632 static void __blkg_clear_stat(struct blkg_iostat_set *bis) 633 { 634 struct blkg_iostat cur = {0}; 635 unsigned long flags; 636 637 flags = u64_stats_update_begin_irqsave(&bis->sync); 638 blkg_iostat_set(&bis->cur, &cur); 639 blkg_iostat_set(&bis->last, &cur); 640 u64_stats_update_end_irqrestore(&bis->sync, flags); 641 } 642 643 static void blkg_clear_stat(struct blkcg_gq *blkg) 644 { 645 int cpu; 646 647 for_each_possible_cpu(cpu) { 648 struct blkg_iostat_set *s = per_cpu_ptr(blkg->iostat_cpu, cpu); 649 650 __blkg_clear_stat(s); 651 } 652 __blkg_clear_stat(&blkg->iostat); 653 } 654 655 static int blkcg_reset_stats(struct cgroup_subsys_state *css, 656 struct cftype *cftype, u64 val) 657 { 658 struct blkcg *blkcg = css_to_blkcg(css); 659 struct blkcg_gq *blkg; 660 int i; 661 662 pr_info_once("blkio.%s is deprecated\n", cftype->name); 663 mutex_lock(&blkcg_pol_mutex); 664 spin_lock_irq(&blkcg->lock); 665 666 /* 667 * Note that stat reset is racy - it doesn't synchronize against 668 * stat updates. This is a debug feature which shouldn't exist 669 * anyway. If you get hit by a race, retry. 670 */ 671 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { 672 blkg_clear_stat(blkg); 673 for (i = 0; i < BLKCG_MAX_POLS; i++) { 674 struct blkcg_policy *pol = blkcg_policy[i]; 675 676 if (blkg->pd[i] && pol->pd_reset_stats_fn) 677 pol->pd_reset_stats_fn(blkg->pd[i]); 678 } 679 } 680 681 spin_unlock_irq(&blkcg->lock); 682 mutex_unlock(&blkcg_pol_mutex); 683 return 0; 684 } 685 686 const char *blkg_dev_name(struct blkcg_gq *blkg) 687 { 688 if (!blkg->q->disk) 689 return NULL; 690 return bdi_dev_name(blkg->q->disk->bdi); 691 } 692 693 /** 694 * blkcg_print_blkgs - helper for printing per-blkg data 695 * @sf: seq_file to print to 696 * @blkcg: blkcg of interest 697 * @prfill: fill function to print out a blkg 698 * @pol: policy in question 699 * @data: data to be passed to @prfill 700 * @show_total: to print out sum of prfill return values or not 701 * 702 * This function invokes @prfill on each blkg of @blkcg if pd for the 703 * policy specified by @pol exists. @prfill is invoked with @sf, the 704 * policy data and @data and the matching queue lock held. If @show_total 705 * is %true, the sum of the return values from @prfill is printed with 706 * "Total" label at the end. 707 * 708 * This is to be used to construct print functions for 709 * cftype->read_seq_string method. 710 */ 711 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, 712 u64 (*prfill)(struct seq_file *, 713 struct blkg_policy_data *, int), 714 const struct blkcg_policy *pol, int data, 715 bool show_total) 716 { 717 struct blkcg_gq *blkg; 718 u64 total = 0; 719 720 rcu_read_lock(); 721 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { 722 spin_lock_irq(&blkg->q->queue_lock); 723 if (blkcg_policy_enabled(blkg->q, pol)) 724 total += prfill(sf, blkg->pd[pol->plid], data); 725 spin_unlock_irq(&blkg->q->queue_lock); 726 } 727 rcu_read_unlock(); 728 729 if (show_total) 730 seq_printf(sf, "Total %llu\n", (unsigned long long)total); 731 } 732 EXPORT_SYMBOL_GPL(blkcg_print_blkgs); 733 734 /** 735 * __blkg_prfill_u64 - prfill helper for a single u64 value 736 * @sf: seq_file to print to 737 * @pd: policy private data of interest 738 * @v: value to print 739 * 740 * Print @v to @sf for the device associated with @pd. 741 */ 742 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) 743 { 744 const char *dname = blkg_dev_name(pd->blkg); 745 746 if (!dname) 747 return 0; 748 749 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); 750 return v; 751 } 752 EXPORT_SYMBOL_GPL(__blkg_prfill_u64); 753 754 /** 755 * blkg_conf_init - initialize a blkg_conf_ctx 756 * @ctx: blkg_conf_ctx to initialize 757 * @input: input string 758 * 759 * Initialize @ctx which can be used to parse blkg config input string @input. 760 * Once initialized, @ctx can be used with blkg_conf_open_bdev() and 761 * blkg_conf_prep(), and must be cleaned up with blkg_conf_exit(). 762 */ 763 void blkg_conf_init(struct blkg_conf_ctx *ctx, char *input) 764 { 765 *ctx = (struct blkg_conf_ctx){ .input = input }; 766 } 767 EXPORT_SYMBOL_GPL(blkg_conf_init); 768 769 /** 770 * blkg_conf_open_bdev - parse and open bdev for per-blkg config update 771 * @ctx: blkg_conf_ctx initialized with blkg_conf_init() 772 * 773 * Parse the device node prefix part, MAJ:MIN, of per-blkg config update from 774 * @ctx->input and get and store the matching bdev in @ctx->bdev. @ctx->body is 775 * set to point past the device node prefix. 776 * 777 * This function may be called multiple times on @ctx and the extra calls become 778 * NOOPs. blkg_conf_prep() implicitly calls this function. Use this function 779 * explicitly if bdev access is needed without resolving the blkcg / policy part 780 * of @ctx->input. Returns -errno on error. 781 */ 782 int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx) 783 { 784 char *input = ctx->input; 785 unsigned int major, minor; 786 struct block_device *bdev; 787 int key_len; 788 789 if (ctx->bdev) 790 return 0; 791 792 if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2) 793 return -EINVAL; 794 795 input += key_len; 796 if (!isspace(*input)) 797 return -EINVAL; 798 input = skip_spaces(input); 799 800 bdev = blkdev_get_no_open(MKDEV(major, minor), false); 801 if (!bdev) 802 return -ENODEV; 803 if (bdev_is_partition(bdev)) { 804 blkdev_put_no_open(bdev); 805 return -ENODEV; 806 } 807 808 mutex_lock(&bdev->bd_queue->rq_qos_mutex); 809 if (!disk_live(bdev->bd_disk)) { 810 blkdev_put_no_open(bdev); 811 mutex_unlock(&bdev->bd_queue->rq_qos_mutex); 812 return -ENODEV; 813 } 814 815 ctx->body = input; 816 ctx->bdev = bdev; 817 return 0; 818 } 819 /* 820 * Similar to blkg_conf_open_bdev, but additionally freezes the queue, 821 * acquires q->elevator_lock, and ensures the correct locking order 822 * between q->elevator_lock and q->rq_qos_mutex. 823 * 824 * This function returns negative error on failure. On success it returns 825 * memflags which must be saved and later passed to blkg_conf_exit_frozen 826 * for restoring the memalloc scope. 827 */ 828 unsigned long __must_check blkg_conf_open_bdev_frozen(struct blkg_conf_ctx *ctx) 829 { 830 int ret; 831 unsigned long memflags; 832 833 if (ctx->bdev) 834 return -EINVAL; 835 836 ret = blkg_conf_open_bdev(ctx); 837 if (ret < 0) 838 return ret; 839 /* 840 * At this point, we haven’t started protecting anything related to QoS, 841 * so we release q->rq_qos_mutex here, which was first acquired in blkg_ 842 * conf_open_bdev. Later, we re-acquire q->rq_qos_mutex after freezing 843 * the queue and acquiring q->elevator_lock to maintain the correct 844 * locking order. 845 */ 846 mutex_unlock(&ctx->bdev->bd_queue->rq_qos_mutex); 847 848 memflags = blk_mq_freeze_queue(ctx->bdev->bd_queue); 849 mutex_lock(&ctx->bdev->bd_queue->elevator_lock); 850 mutex_lock(&ctx->bdev->bd_queue->rq_qos_mutex); 851 852 return memflags; 853 } 854 855 /** 856 * blkg_conf_prep - parse and prepare for per-blkg config update 857 * @blkcg: target block cgroup 858 * @pol: target policy 859 * @ctx: blkg_conf_ctx initialized with blkg_conf_init() 860 * 861 * Parse per-blkg config update from @ctx->input and initialize @ctx 862 * accordingly. On success, @ctx->body points to the part of @ctx->input 863 * following MAJ:MIN, @ctx->bdev points to the target block device and 864 * @ctx->blkg to the blkg being configured. 865 * 866 * blkg_conf_open_bdev() may be called on @ctx beforehand. On success, this 867 * function returns with queue lock held and must be followed by 868 * blkg_conf_exit(). 869 */ 870 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, 871 struct blkg_conf_ctx *ctx) 872 __acquires(&bdev->bd_queue->queue_lock) 873 { 874 struct gendisk *disk; 875 struct request_queue *q; 876 struct blkcg_gq *blkg; 877 int ret; 878 879 ret = blkg_conf_open_bdev(ctx); 880 if (ret) 881 return ret; 882 883 disk = ctx->bdev->bd_disk; 884 q = disk->queue; 885 886 /* 887 * blkcg_deactivate_policy() requires queue to be frozen, we can grab 888 * q_usage_counter to prevent concurrent with blkcg_deactivate_policy(). 889 */ 890 ret = blk_queue_enter(q, 0); 891 if (ret) 892 goto fail; 893 894 spin_lock_irq(&q->queue_lock); 895 896 if (!blkcg_policy_enabled(q, pol)) { 897 ret = -EOPNOTSUPP; 898 goto fail_unlock; 899 } 900 901 blkg = blkg_lookup(blkcg, q); 902 if (blkg) 903 goto success; 904 905 /* 906 * Create blkgs walking down from blkcg_root to @blkcg, so that all 907 * non-root blkgs have access to their parents. 908 */ 909 while (true) { 910 struct blkcg *pos = blkcg; 911 struct blkcg *parent; 912 struct blkcg_gq *new_blkg; 913 914 parent = blkcg_parent(blkcg); 915 while (parent && !blkg_lookup(parent, q)) { 916 pos = parent; 917 parent = blkcg_parent(parent); 918 } 919 920 /* Drop locks to do new blkg allocation with GFP_KERNEL. */ 921 spin_unlock_irq(&q->queue_lock); 922 923 new_blkg = blkg_alloc(pos, disk, GFP_KERNEL); 924 if (unlikely(!new_blkg)) { 925 ret = -ENOMEM; 926 goto fail_exit_queue; 927 } 928 929 if (radix_tree_preload(GFP_KERNEL)) { 930 blkg_free(new_blkg); 931 ret = -ENOMEM; 932 goto fail_exit_queue; 933 } 934 935 spin_lock_irq(&q->queue_lock); 936 937 if (!blkcg_policy_enabled(q, pol)) { 938 blkg_free(new_blkg); 939 ret = -EOPNOTSUPP; 940 goto fail_preloaded; 941 } 942 943 blkg = blkg_lookup(pos, q); 944 if (blkg) { 945 blkg_free(new_blkg); 946 } else { 947 blkg = blkg_create(pos, disk, new_blkg); 948 if (IS_ERR(blkg)) { 949 ret = PTR_ERR(blkg); 950 goto fail_preloaded; 951 } 952 } 953 954 radix_tree_preload_end(); 955 956 if (pos == blkcg) 957 goto success; 958 } 959 success: 960 blk_queue_exit(q); 961 ctx->blkg = blkg; 962 return 0; 963 964 fail_preloaded: 965 radix_tree_preload_end(); 966 fail_unlock: 967 spin_unlock_irq(&q->queue_lock); 968 fail_exit_queue: 969 blk_queue_exit(q); 970 fail: 971 /* 972 * If queue was bypassing, we should retry. Do so after a 973 * short msleep(). It isn't strictly necessary but queue 974 * can be bypassing for some time and it's always nice to 975 * avoid busy looping. 976 */ 977 if (ret == -EBUSY) { 978 msleep(10); 979 ret = restart_syscall(); 980 } 981 return ret; 982 } 983 EXPORT_SYMBOL_GPL(blkg_conf_prep); 984 985 /** 986 * blkg_conf_exit - clean up per-blkg config update 987 * @ctx: blkg_conf_ctx initialized with blkg_conf_init() 988 * 989 * Clean up after per-blkg config update. This function must be called on all 990 * blkg_conf_ctx's initialized with blkg_conf_init(). 991 */ 992 void blkg_conf_exit(struct blkg_conf_ctx *ctx) 993 __releases(&ctx->bdev->bd_queue->queue_lock) 994 __releases(&ctx->bdev->bd_queue->rq_qos_mutex) 995 { 996 if (ctx->blkg) { 997 spin_unlock_irq(&bdev_get_queue(ctx->bdev)->queue_lock); 998 ctx->blkg = NULL; 999 } 1000 1001 if (ctx->bdev) { 1002 mutex_unlock(&ctx->bdev->bd_queue->rq_qos_mutex); 1003 blkdev_put_no_open(ctx->bdev); 1004 ctx->body = NULL; 1005 ctx->bdev = NULL; 1006 } 1007 } 1008 EXPORT_SYMBOL_GPL(blkg_conf_exit); 1009 1010 /* 1011 * Similar to blkg_conf_exit, but also unfreezes the queue and releases 1012 * q->elevator_lock. Should be used when blkg_conf_open_bdev_frozen 1013 * is used to open the bdev. 1014 */ 1015 void blkg_conf_exit_frozen(struct blkg_conf_ctx *ctx, unsigned long memflags) 1016 { 1017 if (ctx->bdev) { 1018 struct request_queue *q = ctx->bdev->bd_queue; 1019 1020 blkg_conf_exit(ctx); 1021 mutex_unlock(&q->elevator_lock); 1022 blk_mq_unfreeze_queue(q, memflags); 1023 } 1024 } 1025 1026 static void blkg_iostat_add(struct blkg_iostat *dst, struct blkg_iostat *src) 1027 { 1028 int i; 1029 1030 for (i = 0; i < BLKG_IOSTAT_NR; i++) { 1031 dst->bytes[i] += src->bytes[i]; 1032 dst->ios[i] += src->ios[i]; 1033 } 1034 } 1035 1036 static void blkg_iostat_sub(struct blkg_iostat *dst, struct blkg_iostat *src) 1037 { 1038 int i; 1039 1040 for (i = 0; i < BLKG_IOSTAT_NR; i++) { 1041 dst->bytes[i] -= src->bytes[i]; 1042 dst->ios[i] -= src->ios[i]; 1043 } 1044 } 1045 1046 static void blkcg_iostat_update(struct blkcg_gq *blkg, struct blkg_iostat *cur, 1047 struct blkg_iostat *last) 1048 { 1049 struct blkg_iostat delta; 1050 unsigned long flags; 1051 1052 /* propagate percpu delta to global */ 1053 flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); 1054 blkg_iostat_set(&delta, cur); 1055 blkg_iostat_sub(&delta, last); 1056 blkg_iostat_add(&blkg->iostat.cur, &delta); 1057 blkg_iostat_add(last, &delta); 1058 u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); 1059 } 1060 1061 static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu) 1062 { 1063 struct llist_head *lhead = per_cpu_ptr(blkcg->lhead, cpu); 1064 struct llist_node *lnode; 1065 struct blkg_iostat_set *bisc, *next_bisc; 1066 unsigned long flags; 1067 1068 rcu_read_lock(); 1069 1070 lnode = llist_del_all(lhead); 1071 if (!lnode) 1072 goto out; 1073 1074 /* 1075 * For covering concurrent parent blkg update from blkg_release(). 1076 * 1077 * When flushing from cgroup, the subsystem rstat lock is always held, 1078 * so this lock won't cause contention most of time. 1079 */ 1080 raw_spin_lock_irqsave(&blkg_stat_lock, flags); 1081 1082 /* 1083 * Iterate only the iostat_cpu's queued in the lockless list. 1084 */ 1085 llist_for_each_entry_safe(bisc, next_bisc, lnode, lnode) { 1086 struct blkcg_gq *blkg = bisc->blkg; 1087 struct blkcg_gq *parent = blkg->parent; 1088 struct blkg_iostat cur; 1089 unsigned int seq; 1090 1091 /* 1092 * Order assignment of `next_bisc` from `bisc->lnode.next` in 1093 * llist_for_each_entry_safe and clearing `bisc->lqueued` for 1094 * avoiding to assign `next_bisc` with new next pointer added 1095 * in blk_cgroup_bio_start() in case of re-ordering. 1096 * 1097 * The pair barrier is implied in llist_add() in blk_cgroup_bio_start(). 1098 */ 1099 smp_mb(); 1100 1101 WRITE_ONCE(bisc->lqueued, false); 1102 if (bisc == &blkg->iostat) 1103 goto propagate_up; /* propagate up to parent only */ 1104 1105 /* fetch the current per-cpu values */ 1106 do { 1107 seq = u64_stats_fetch_begin(&bisc->sync); 1108 blkg_iostat_set(&cur, &bisc->cur); 1109 } while (u64_stats_fetch_retry(&bisc->sync, seq)); 1110 1111 blkcg_iostat_update(blkg, &cur, &bisc->last); 1112 1113 propagate_up: 1114 /* propagate global delta to parent (unless that's root) */ 1115 if (parent && parent->parent) { 1116 blkcg_iostat_update(parent, &blkg->iostat.cur, 1117 &blkg->iostat.last); 1118 /* 1119 * Queue parent->iostat to its blkcg's lockless 1120 * list to propagate up to the grandparent if the 1121 * iostat hasn't been queued yet. 1122 */ 1123 if (!parent->iostat.lqueued) { 1124 struct llist_head *plhead; 1125 1126 plhead = per_cpu_ptr(parent->blkcg->lhead, cpu); 1127 llist_add(&parent->iostat.lnode, plhead); 1128 parent->iostat.lqueued = true; 1129 } 1130 } 1131 } 1132 raw_spin_unlock_irqrestore(&blkg_stat_lock, flags); 1133 out: 1134 rcu_read_unlock(); 1135 } 1136 1137 static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu) 1138 { 1139 /* Root-level stats are sourced from system-wide IO stats */ 1140 if (cgroup_parent(css->cgroup)) 1141 __blkcg_rstat_flush(css_to_blkcg(css), cpu); 1142 } 1143 1144 /* 1145 * We source root cgroup stats from the system-wide stats to avoid 1146 * tracking the same information twice and incurring overhead when no 1147 * cgroups are defined. For that reason, css_rstat_flush in 1148 * blkcg_print_stat does not actually fill out the iostat in the root 1149 * cgroup's blkcg_gq. 1150 * 1151 * However, we would like to re-use the printing code between the root and 1152 * non-root cgroups to the extent possible. For that reason, we simulate 1153 * flushing the root cgroup's stats by explicitly filling in the iostat 1154 * with disk level statistics. 1155 */ 1156 static void blkcg_fill_root_iostats(void) 1157 { 1158 struct class_dev_iter iter; 1159 struct device *dev; 1160 1161 class_dev_iter_init(&iter, &block_class, NULL, &disk_type); 1162 while ((dev = class_dev_iter_next(&iter))) { 1163 struct block_device *bdev = dev_to_bdev(dev); 1164 struct blkcg_gq *blkg = bdev->bd_disk->queue->root_blkg; 1165 struct blkg_iostat tmp; 1166 int cpu; 1167 unsigned long flags; 1168 1169 memset(&tmp, 0, sizeof(tmp)); 1170 for_each_possible_cpu(cpu) { 1171 struct disk_stats *cpu_dkstats; 1172 1173 cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu); 1174 tmp.ios[BLKG_IOSTAT_READ] += 1175 cpu_dkstats->ios[STAT_READ]; 1176 tmp.ios[BLKG_IOSTAT_WRITE] += 1177 cpu_dkstats->ios[STAT_WRITE]; 1178 tmp.ios[BLKG_IOSTAT_DISCARD] += 1179 cpu_dkstats->ios[STAT_DISCARD]; 1180 // convert sectors to bytes 1181 tmp.bytes[BLKG_IOSTAT_READ] += 1182 cpu_dkstats->sectors[STAT_READ] << 9; 1183 tmp.bytes[BLKG_IOSTAT_WRITE] += 1184 cpu_dkstats->sectors[STAT_WRITE] << 9; 1185 tmp.bytes[BLKG_IOSTAT_DISCARD] += 1186 cpu_dkstats->sectors[STAT_DISCARD] << 9; 1187 } 1188 1189 flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); 1190 blkg_iostat_set(&blkg->iostat.cur, &tmp); 1191 u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); 1192 } 1193 class_dev_iter_exit(&iter); 1194 } 1195 1196 static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s) 1197 { 1198 struct blkg_iostat_set *bis = &blkg->iostat; 1199 u64 rbytes, wbytes, rios, wios, dbytes, dios; 1200 const char *dname; 1201 unsigned seq; 1202 int i; 1203 1204 if (!blkg->online) 1205 return; 1206 1207 dname = blkg_dev_name(blkg); 1208 if (!dname) 1209 return; 1210 1211 seq_printf(s, "%s ", dname); 1212 1213 do { 1214 seq = u64_stats_fetch_begin(&bis->sync); 1215 1216 rbytes = bis->cur.bytes[BLKG_IOSTAT_READ]; 1217 wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE]; 1218 dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD]; 1219 rios = bis->cur.ios[BLKG_IOSTAT_READ]; 1220 wios = bis->cur.ios[BLKG_IOSTAT_WRITE]; 1221 dios = bis->cur.ios[BLKG_IOSTAT_DISCARD]; 1222 } while (u64_stats_fetch_retry(&bis->sync, seq)); 1223 1224 if (rbytes || wbytes || rios || wios) { 1225 seq_printf(s, "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu", 1226 rbytes, wbytes, rios, wios, 1227 dbytes, dios); 1228 } 1229 1230 if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) { 1231 seq_printf(s, " use_delay=%d delay_nsec=%llu", 1232 atomic_read(&blkg->use_delay), 1233 atomic64_read(&blkg->delay_nsec)); 1234 } 1235 1236 for (i = 0; i < BLKCG_MAX_POLS; i++) { 1237 struct blkcg_policy *pol = blkcg_policy[i]; 1238 1239 if (!blkg->pd[i] || !pol->pd_stat_fn) 1240 continue; 1241 1242 pol->pd_stat_fn(blkg->pd[i], s); 1243 } 1244 1245 seq_puts(s, "\n"); 1246 } 1247 1248 static int blkcg_print_stat(struct seq_file *sf, void *v) 1249 { 1250 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); 1251 struct blkcg_gq *blkg; 1252 1253 if (!seq_css(sf)->parent) 1254 blkcg_fill_root_iostats(); 1255 else 1256 css_rstat_flush(&blkcg->css); 1257 1258 rcu_read_lock(); 1259 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { 1260 spin_lock_irq(&blkg->q->queue_lock); 1261 blkcg_print_one_stat(blkg, sf); 1262 spin_unlock_irq(&blkg->q->queue_lock); 1263 } 1264 rcu_read_unlock(); 1265 return 0; 1266 } 1267 1268 static struct cftype blkcg_files[] = { 1269 { 1270 .name = "stat", 1271 .seq_show = blkcg_print_stat, 1272 }, 1273 { } /* terminate */ 1274 }; 1275 1276 static struct cftype blkcg_legacy_files[] = { 1277 { 1278 .name = "reset_stats", 1279 .write_u64 = blkcg_reset_stats, 1280 }, 1281 { } /* terminate */ 1282 }; 1283 1284 #ifdef CONFIG_CGROUP_WRITEBACK 1285 struct list_head *blkcg_get_cgwb_list(struct cgroup_subsys_state *css) 1286 { 1287 return &css_to_blkcg(css)->cgwb_list; 1288 } 1289 #endif 1290 1291 /* 1292 * blkcg destruction is a three-stage process. 1293 * 1294 * 1. Destruction starts. The blkcg_css_offline() callback is invoked 1295 * which offlines writeback. Here we tie the next stage of blkg destruction 1296 * to the completion of writeback associated with the blkcg. This lets us 1297 * avoid punting potentially large amounts of outstanding writeback to root 1298 * while maintaining any ongoing policies. The next stage is triggered when 1299 * the nr_cgwbs count goes to zero. 1300 * 1301 * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called 1302 * and handles the destruction of blkgs. Here the css reference held by 1303 * the blkg is put back eventually allowing blkcg_css_free() to be called. 1304 * This work may occur in cgwb_release_workfn() on the cgwb_release 1305 * workqueue. Any submitted ios that fail to get the blkg ref will be 1306 * punted to the root_blkg. 1307 * 1308 * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called. 1309 * This finally frees the blkcg. 1310 */ 1311 1312 /** 1313 * blkcg_destroy_blkgs - responsible for shooting down blkgs 1314 * @blkcg: blkcg of interest 1315 * 1316 * blkgs should be removed while holding both q and blkcg locks. As blkcg lock 1317 * is nested inside q lock, this function performs reverse double lock dancing. 1318 * Destroying the blkgs releases the reference held on the blkcg's css allowing 1319 * blkcg_css_free to eventually be called. 1320 * 1321 * This is the blkcg counterpart of ioc_release_fn(). 1322 */ 1323 static void blkcg_destroy_blkgs(struct blkcg *blkcg) 1324 { 1325 might_sleep(); 1326 1327 spin_lock_irq(&blkcg->lock); 1328 1329 while (!hlist_empty(&blkcg->blkg_list)) { 1330 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, 1331 struct blkcg_gq, blkcg_node); 1332 struct request_queue *q = blkg->q; 1333 1334 if (need_resched() || !spin_trylock(&q->queue_lock)) { 1335 /* 1336 * Given that the system can accumulate a huge number 1337 * of blkgs in pathological cases, check to see if we 1338 * need to rescheduling to avoid softlockup. 1339 */ 1340 spin_unlock_irq(&blkcg->lock); 1341 cond_resched(); 1342 spin_lock_irq(&blkcg->lock); 1343 continue; 1344 } 1345 1346 blkg_destroy(blkg); 1347 spin_unlock(&q->queue_lock); 1348 } 1349 1350 spin_unlock_irq(&blkcg->lock); 1351 } 1352 1353 /** 1354 * blkcg_pin_online - pin online state 1355 * @blkcg_css: blkcg of interest 1356 * 1357 * While pinned, a blkcg is kept online. This is primarily used to 1358 * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline 1359 * while an associated cgwb is still active. 1360 */ 1361 void blkcg_pin_online(struct cgroup_subsys_state *blkcg_css) 1362 { 1363 refcount_inc(&css_to_blkcg(blkcg_css)->online_pin); 1364 } 1365 1366 /** 1367 * blkcg_unpin_online - unpin online state 1368 * @blkcg_css: blkcg of interest 1369 * 1370 * This is primarily used to impedance-match blkg and cgwb lifetimes so 1371 * that blkg doesn't go offline while an associated cgwb is still active. 1372 * When this count goes to zero, all active cgwbs have finished so the 1373 * blkcg can continue destruction by calling blkcg_destroy_blkgs(). 1374 */ 1375 void blkcg_unpin_online(struct cgroup_subsys_state *blkcg_css) 1376 { 1377 struct blkcg *blkcg = css_to_blkcg(blkcg_css); 1378 1379 do { 1380 struct blkcg *parent; 1381 1382 if (!refcount_dec_and_test(&blkcg->online_pin)) 1383 break; 1384 1385 parent = blkcg_parent(blkcg); 1386 blkcg_destroy_blkgs(blkcg); 1387 blkcg = parent; 1388 } while (blkcg); 1389 } 1390 1391 /** 1392 * blkcg_css_offline - cgroup css_offline callback 1393 * @css: css of interest 1394 * 1395 * This function is called when @css is about to go away. Here the cgwbs are 1396 * offlined first and only once writeback associated with the blkcg has 1397 * finished do we start step 2 (see above). 1398 */ 1399 static void blkcg_css_offline(struct cgroup_subsys_state *css) 1400 { 1401 /* this prevents anyone from attaching or migrating to this blkcg */ 1402 wb_blkcg_offline(css); 1403 1404 /* put the base online pin allowing step 2 to be triggered */ 1405 blkcg_unpin_online(css); 1406 } 1407 1408 static void blkcg_css_free(struct cgroup_subsys_state *css) 1409 { 1410 struct blkcg *blkcg = css_to_blkcg(css); 1411 int i; 1412 1413 mutex_lock(&blkcg_pol_mutex); 1414 1415 list_del(&blkcg->all_blkcgs_node); 1416 1417 for (i = 0; i < BLKCG_MAX_POLS; i++) 1418 if (blkcg->cpd[i]) 1419 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); 1420 1421 mutex_unlock(&blkcg_pol_mutex); 1422 1423 free_percpu(blkcg->lhead); 1424 kfree(blkcg); 1425 } 1426 1427 static struct cgroup_subsys_state * 1428 blkcg_css_alloc(struct cgroup_subsys_state *parent_css) 1429 { 1430 struct blkcg *blkcg; 1431 int i; 1432 1433 mutex_lock(&blkcg_pol_mutex); 1434 1435 if (!parent_css) { 1436 blkcg = &blkcg_root; 1437 } else { 1438 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); 1439 if (!blkcg) 1440 goto unlock; 1441 } 1442 1443 if (init_blkcg_llists(blkcg)) 1444 goto free_blkcg; 1445 1446 for (i = 0; i < BLKCG_MAX_POLS ; i++) { 1447 struct blkcg_policy *pol = blkcg_policy[i]; 1448 struct blkcg_policy_data *cpd; 1449 1450 /* 1451 * If the policy hasn't been attached yet, wait for it 1452 * to be attached before doing anything else. Otherwise, 1453 * check if the policy requires any specific per-cgroup 1454 * data: if it does, allocate and initialize it. 1455 */ 1456 if (!pol || !pol->cpd_alloc_fn) 1457 continue; 1458 1459 cpd = pol->cpd_alloc_fn(GFP_KERNEL); 1460 if (!cpd) 1461 goto free_pd_blkcg; 1462 1463 blkcg->cpd[i] = cpd; 1464 cpd->blkcg = blkcg; 1465 cpd->plid = i; 1466 } 1467 1468 spin_lock_init(&blkcg->lock); 1469 refcount_set(&blkcg->online_pin, 1); 1470 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT); 1471 INIT_HLIST_HEAD(&blkcg->blkg_list); 1472 #ifdef CONFIG_CGROUP_WRITEBACK 1473 INIT_LIST_HEAD(&blkcg->cgwb_list); 1474 #endif 1475 list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs); 1476 1477 mutex_unlock(&blkcg_pol_mutex); 1478 return &blkcg->css; 1479 1480 free_pd_blkcg: 1481 for (i--; i >= 0; i--) 1482 if (blkcg->cpd[i]) 1483 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); 1484 free_percpu(blkcg->lhead); 1485 free_blkcg: 1486 if (blkcg != &blkcg_root) 1487 kfree(blkcg); 1488 unlock: 1489 mutex_unlock(&blkcg_pol_mutex); 1490 return ERR_PTR(-ENOMEM); 1491 } 1492 1493 static int blkcg_css_online(struct cgroup_subsys_state *css) 1494 { 1495 struct blkcg *parent = blkcg_parent(css_to_blkcg(css)); 1496 1497 /* 1498 * blkcg_pin_online() is used to delay blkcg offline so that blkgs 1499 * don't go offline while cgwbs are still active on them. Pin the 1500 * parent so that offline always happens towards the root. 1501 */ 1502 if (parent) 1503 blkcg_pin_online(&parent->css); 1504 return 0; 1505 } 1506 1507 void blkg_init_queue(struct request_queue *q) 1508 { 1509 INIT_LIST_HEAD(&q->blkg_list); 1510 mutex_init(&q->blkcg_mutex); 1511 } 1512 1513 int blkcg_init_disk(struct gendisk *disk) 1514 { 1515 struct request_queue *q = disk->queue; 1516 struct blkcg_gq *new_blkg, *blkg; 1517 bool preloaded; 1518 1519 new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL); 1520 if (!new_blkg) 1521 return -ENOMEM; 1522 1523 preloaded = !radix_tree_preload(GFP_KERNEL); 1524 1525 /* Make sure the root blkg exists. */ 1526 /* spin_lock_irq can serve as RCU read-side critical section. */ 1527 spin_lock_irq(&q->queue_lock); 1528 blkg = blkg_create(&blkcg_root, disk, new_blkg); 1529 if (IS_ERR(blkg)) 1530 goto err_unlock; 1531 q->root_blkg = blkg; 1532 spin_unlock_irq(&q->queue_lock); 1533 1534 if (preloaded) 1535 radix_tree_preload_end(); 1536 1537 return 0; 1538 1539 err_unlock: 1540 spin_unlock_irq(&q->queue_lock); 1541 if (preloaded) 1542 radix_tree_preload_end(); 1543 return PTR_ERR(blkg); 1544 } 1545 1546 void blkcg_exit_disk(struct gendisk *disk) 1547 { 1548 blkg_destroy_all(disk); 1549 blk_throtl_exit(disk); 1550 } 1551 1552 static void blkcg_exit(struct task_struct *tsk) 1553 { 1554 if (tsk->throttle_disk) 1555 put_disk(tsk->throttle_disk); 1556 tsk->throttle_disk = NULL; 1557 } 1558 1559 struct cgroup_subsys io_cgrp_subsys = { 1560 .css_alloc = blkcg_css_alloc, 1561 .css_online = blkcg_css_online, 1562 .css_offline = blkcg_css_offline, 1563 .css_free = blkcg_css_free, 1564 .css_rstat_flush = blkcg_rstat_flush, 1565 .dfl_cftypes = blkcg_files, 1566 .legacy_cftypes = blkcg_legacy_files, 1567 .legacy_name = "blkio", 1568 .exit = blkcg_exit, 1569 #ifdef CONFIG_MEMCG 1570 /* 1571 * This ensures that, if available, memcg is automatically enabled 1572 * together on the default hierarchy so that the owner cgroup can 1573 * be retrieved from writeback pages. 1574 */ 1575 .depends_on = 1 << memory_cgrp_id, 1576 #endif 1577 }; 1578 EXPORT_SYMBOL_GPL(io_cgrp_subsys); 1579 1580 /** 1581 * blkcg_activate_policy - activate a blkcg policy on a gendisk 1582 * @disk: gendisk of interest 1583 * @pol: blkcg policy to activate 1584 * 1585 * Activate @pol on @disk. Requires %GFP_KERNEL context. @disk goes through 1586 * bypass mode to populate its blkgs with policy_data for @pol. 1587 * 1588 * Activation happens with @disk bypassed, so nobody would be accessing blkgs 1589 * from IO path. Update of each blkg is protected by both queue and blkcg 1590 * locks so that holding either lock and testing blkcg_policy_enabled() is 1591 * always enough for dereferencing policy data. 1592 * 1593 * The caller is responsible for synchronizing [de]activations and policy 1594 * [un]registerations. Returns 0 on success, -errno on failure. 1595 */ 1596 int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol) 1597 { 1598 struct request_queue *q = disk->queue; 1599 struct blkg_policy_data *pd_prealloc = NULL; 1600 struct blkcg_gq *blkg, *pinned_blkg = NULL; 1601 unsigned int memflags; 1602 int ret; 1603 1604 if (blkcg_policy_enabled(q, pol)) 1605 return 0; 1606 1607 /* 1608 * Policy is allowed to be registered without pd_alloc_fn/pd_free_fn, 1609 * for example, ioprio. Such policy will work on blkcg level, not disk 1610 * level, and don't need to be activated. 1611 */ 1612 if (WARN_ON_ONCE(!pol->pd_alloc_fn || !pol->pd_free_fn)) 1613 return -EINVAL; 1614 1615 if (queue_is_mq(q)) 1616 memflags = blk_mq_freeze_queue(q); 1617 retry: 1618 spin_lock_irq(&q->queue_lock); 1619 1620 /* blkg_list is pushed at the head, reverse walk to initialize parents first */ 1621 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) { 1622 struct blkg_policy_data *pd; 1623 1624 if (blkg->pd[pol->plid]) 1625 continue; 1626 1627 /* If prealloc matches, use it; otherwise try GFP_NOWAIT */ 1628 if (blkg == pinned_blkg) { 1629 pd = pd_prealloc; 1630 pd_prealloc = NULL; 1631 } else { 1632 pd = pol->pd_alloc_fn(disk, blkg->blkcg, 1633 GFP_NOWAIT); 1634 } 1635 1636 if (!pd) { 1637 /* 1638 * GFP_NOWAIT failed. Free the existing one and 1639 * prealloc for @blkg w/ GFP_KERNEL. 1640 */ 1641 if (pinned_blkg) 1642 blkg_put(pinned_blkg); 1643 blkg_get(blkg); 1644 pinned_blkg = blkg; 1645 1646 spin_unlock_irq(&q->queue_lock); 1647 1648 if (pd_prealloc) 1649 pol->pd_free_fn(pd_prealloc); 1650 pd_prealloc = pol->pd_alloc_fn(disk, blkg->blkcg, 1651 GFP_KERNEL); 1652 if (pd_prealloc) 1653 goto retry; 1654 else 1655 goto enomem; 1656 } 1657 1658 spin_lock(&blkg->blkcg->lock); 1659 1660 pd->blkg = blkg; 1661 pd->plid = pol->plid; 1662 blkg->pd[pol->plid] = pd; 1663 1664 if (pol->pd_init_fn) 1665 pol->pd_init_fn(pd); 1666 1667 if (pol->pd_online_fn) 1668 pol->pd_online_fn(pd); 1669 pd->online = true; 1670 1671 spin_unlock(&blkg->blkcg->lock); 1672 } 1673 1674 __set_bit(pol->plid, q->blkcg_pols); 1675 ret = 0; 1676 1677 spin_unlock_irq(&q->queue_lock); 1678 out: 1679 if (queue_is_mq(q)) 1680 blk_mq_unfreeze_queue(q, memflags); 1681 if (pinned_blkg) 1682 blkg_put(pinned_blkg); 1683 if (pd_prealloc) 1684 pol->pd_free_fn(pd_prealloc); 1685 return ret; 1686 1687 enomem: 1688 /* alloc failed, take down everything */ 1689 spin_lock_irq(&q->queue_lock); 1690 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1691 struct blkcg *blkcg = blkg->blkcg; 1692 struct blkg_policy_data *pd; 1693 1694 spin_lock(&blkcg->lock); 1695 pd = blkg->pd[pol->plid]; 1696 if (pd) { 1697 if (pd->online && pol->pd_offline_fn) 1698 pol->pd_offline_fn(pd); 1699 pd->online = false; 1700 pol->pd_free_fn(pd); 1701 blkg->pd[pol->plid] = NULL; 1702 } 1703 spin_unlock(&blkcg->lock); 1704 } 1705 spin_unlock_irq(&q->queue_lock); 1706 ret = -ENOMEM; 1707 goto out; 1708 } 1709 EXPORT_SYMBOL_GPL(blkcg_activate_policy); 1710 1711 /** 1712 * blkcg_deactivate_policy - deactivate a blkcg policy on a gendisk 1713 * @disk: gendisk of interest 1714 * @pol: blkcg policy to deactivate 1715 * 1716 * Deactivate @pol on @disk. Follows the same synchronization rules as 1717 * blkcg_activate_policy(). 1718 */ 1719 void blkcg_deactivate_policy(struct gendisk *disk, 1720 const struct blkcg_policy *pol) 1721 { 1722 struct request_queue *q = disk->queue; 1723 struct blkcg_gq *blkg; 1724 unsigned int memflags; 1725 1726 if (!blkcg_policy_enabled(q, pol)) 1727 return; 1728 1729 if (queue_is_mq(q)) 1730 memflags = blk_mq_freeze_queue(q); 1731 1732 mutex_lock(&q->blkcg_mutex); 1733 spin_lock_irq(&q->queue_lock); 1734 1735 __clear_bit(pol->plid, q->blkcg_pols); 1736 1737 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1738 struct blkcg *blkcg = blkg->blkcg; 1739 1740 spin_lock(&blkcg->lock); 1741 if (blkg->pd[pol->plid]) { 1742 if (blkg->pd[pol->plid]->online && pol->pd_offline_fn) 1743 pol->pd_offline_fn(blkg->pd[pol->plid]); 1744 pol->pd_free_fn(blkg->pd[pol->plid]); 1745 blkg->pd[pol->plid] = NULL; 1746 } 1747 spin_unlock(&blkcg->lock); 1748 } 1749 1750 spin_unlock_irq(&q->queue_lock); 1751 mutex_unlock(&q->blkcg_mutex); 1752 1753 if (queue_is_mq(q)) 1754 blk_mq_unfreeze_queue(q, memflags); 1755 } 1756 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); 1757 1758 static void blkcg_free_all_cpd(struct blkcg_policy *pol) 1759 { 1760 struct blkcg *blkcg; 1761 1762 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { 1763 if (blkcg->cpd[pol->plid]) { 1764 pol->cpd_free_fn(blkcg->cpd[pol->plid]); 1765 blkcg->cpd[pol->plid] = NULL; 1766 } 1767 } 1768 } 1769 1770 /** 1771 * blkcg_policy_register - register a blkcg policy 1772 * @pol: blkcg policy to register 1773 * 1774 * Register @pol with blkcg core. Might sleep and @pol may be modified on 1775 * successful registration. Returns 0 on success and -errno on failure. 1776 */ 1777 int blkcg_policy_register(struct blkcg_policy *pol) 1778 { 1779 struct blkcg *blkcg; 1780 int i, ret; 1781 1782 /* 1783 * Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs, and policy 1784 * without pd_alloc_fn/pd_free_fn can't be activated. 1785 */ 1786 if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) || 1787 (!pol->pd_alloc_fn ^ !pol->pd_free_fn)) 1788 return -EINVAL; 1789 1790 mutex_lock(&blkcg_pol_register_mutex); 1791 mutex_lock(&blkcg_pol_mutex); 1792 1793 /* find an empty slot */ 1794 for (i = 0; i < BLKCG_MAX_POLS; i++) 1795 if (!blkcg_policy[i]) 1796 break; 1797 if (i >= BLKCG_MAX_POLS) { 1798 pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n"); 1799 ret = -ENOSPC; 1800 goto err_unlock; 1801 } 1802 1803 /* register @pol */ 1804 pol->plid = i; 1805 blkcg_policy[pol->plid] = pol; 1806 1807 /* allocate and install cpd's */ 1808 if (pol->cpd_alloc_fn) { 1809 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { 1810 struct blkcg_policy_data *cpd; 1811 1812 cpd = pol->cpd_alloc_fn(GFP_KERNEL); 1813 if (!cpd) { 1814 ret = -ENOMEM; 1815 goto err_free_cpds; 1816 } 1817 1818 blkcg->cpd[pol->plid] = cpd; 1819 cpd->blkcg = blkcg; 1820 cpd->plid = pol->plid; 1821 } 1822 } 1823 1824 mutex_unlock(&blkcg_pol_mutex); 1825 1826 /* everything is in place, add intf files for the new policy */ 1827 if (pol->dfl_cftypes == pol->legacy_cftypes) { 1828 WARN_ON(cgroup_add_cftypes(&io_cgrp_subsys, 1829 pol->dfl_cftypes)); 1830 } else { 1831 WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys, 1832 pol->dfl_cftypes)); 1833 WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys, 1834 pol->legacy_cftypes)); 1835 } 1836 mutex_unlock(&blkcg_pol_register_mutex); 1837 return 0; 1838 1839 err_free_cpds: 1840 if (pol->cpd_free_fn) 1841 blkcg_free_all_cpd(pol); 1842 1843 blkcg_policy[pol->plid] = NULL; 1844 err_unlock: 1845 mutex_unlock(&blkcg_pol_mutex); 1846 mutex_unlock(&blkcg_pol_register_mutex); 1847 return ret; 1848 } 1849 EXPORT_SYMBOL_GPL(blkcg_policy_register); 1850 1851 /** 1852 * blkcg_policy_unregister - unregister a blkcg policy 1853 * @pol: blkcg policy to unregister 1854 * 1855 * Undo blkcg_policy_register(@pol). Might sleep. 1856 */ 1857 void blkcg_policy_unregister(struct blkcg_policy *pol) 1858 { 1859 mutex_lock(&blkcg_pol_register_mutex); 1860 1861 if (WARN_ON(blkcg_policy[pol->plid] != pol)) 1862 goto out_unlock; 1863 1864 /* kill the intf files first */ 1865 if (pol->dfl_cftypes) 1866 cgroup_rm_cftypes(pol->dfl_cftypes); 1867 if (pol->legacy_cftypes) 1868 cgroup_rm_cftypes(pol->legacy_cftypes); 1869 1870 /* remove cpds and unregister */ 1871 mutex_lock(&blkcg_pol_mutex); 1872 1873 if (pol->cpd_free_fn) 1874 blkcg_free_all_cpd(pol); 1875 1876 blkcg_policy[pol->plid] = NULL; 1877 1878 mutex_unlock(&blkcg_pol_mutex); 1879 out_unlock: 1880 mutex_unlock(&blkcg_pol_register_mutex); 1881 } 1882 EXPORT_SYMBOL_GPL(blkcg_policy_unregister); 1883 1884 /* 1885 * Scale the accumulated delay based on how long it has been since we updated 1886 * the delay. We only call this when we are adding delay, in case it's been a 1887 * while since we added delay, and when we are checking to see if we need to 1888 * delay a task, to account for any delays that may have occurred. 1889 */ 1890 static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now) 1891 { 1892 u64 old = atomic64_read(&blkg->delay_start); 1893 1894 /* negative use_delay means no scaling, see blkcg_set_delay() */ 1895 if (atomic_read(&blkg->use_delay) < 0) 1896 return; 1897 1898 /* 1899 * We only want to scale down every second. The idea here is that we 1900 * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain 1901 * time window. We only want to throttle tasks for recent delay that 1902 * has occurred, in 1 second time windows since that's the maximum 1903 * things can be throttled. We save the current delay window in 1904 * blkg->last_delay so we know what amount is still left to be charged 1905 * to the blkg from this point onward. blkg->last_use keeps track of 1906 * the use_delay counter. The idea is if we're unthrottling the blkg we 1907 * are ok with whatever is happening now, and we can take away more of 1908 * the accumulated delay as we've already throttled enough that 1909 * everybody is happy with their IO latencies. 1910 */ 1911 if (time_before64(old + NSEC_PER_SEC, now) && 1912 atomic64_try_cmpxchg(&blkg->delay_start, &old, now)) { 1913 u64 cur = atomic64_read(&blkg->delay_nsec); 1914 u64 sub = min_t(u64, blkg->last_delay, now - old); 1915 int cur_use = atomic_read(&blkg->use_delay); 1916 1917 /* 1918 * We've been unthrottled, subtract a larger chunk of our 1919 * accumulated delay. 1920 */ 1921 if (cur_use < blkg->last_use) 1922 sub = max_t(u64, sub, blkg->last_delay >> 1); 1923 1924 /* 1925 * This shouldn't happen, but handle it anyway. Our delay_nsec 1926 * should only ever be growing except here where we subtract out 1927 * min(last_delay, 1 second), but lord knows bugs happen and I'd 1928 * rather not end up with negative numbers. 1929 */ 1930 if (unlikely(cur < sub)) { 1931 atomic64_set(&blkg->delay_nsec, 0); 1932 blkg->last_delay = 0; 1933 } else { 1934 atomic64_sub(sub, &blkg->delay_nsec); 1935 blkg->last_delay = cur - sub; 1936 } 1937 blkg->last_use = cur_use; 1938 } 1939 } 1940 1941 /* 1942 * This is called when we want to actually walk up the hierarchy and check to 1943 * see if we need to throttle, and then actually throttle if there is some 1944 * accumulated delay. This should only be called upon return to user space so 1945 * we're not holding some lock that would induce a priority inversion. 1946 */ 1947 static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay) 1948 { 1949 unsigned long pflags; 1950 bool clamp; 1951 u64 now = blk_time_get_ns(); 1952 u64 exp; 1953 u64 delay_nsec = 0; 1954 int tok; 1955 1956 while (blkg->parent) { 1957 int use_delay = atomic_read(&blkg->use_delay); 1958 1959 if (use_delay) { 1960 u64 this_delay; 1961 1962 blkcg_scale_delay(blkg, now); 1963 this_delay = atomic64_read(&blkg->delay_nsec); 1964 if (this_delay > delay_nsec) { 1965 delay_nsec = this_delay; 1966 clamp = use_delay > 0; 1967 } 1968 } 1969 blkg = blkg->parent; 1970 } 1971 1972 if (!delay_nsec) 1973 return; 1974 1975 /* 1976 * Let's not sleep for all eternity if we've amassed a huge delay. 1977 * Swapping or metadata IO can accumulate 10's of seconds worth of 1978 * delay, and we want userspace to be able to do _something_ so cap the 1979 * delays at 0.25s. If there's 10's of seconds worth of delay then the 1980 * tasks will be delayed for 0.25 second for every syscall. If 1981 * blkcg_set_delay() was used as indicated by negative use_delay, the 1982 * caller is responsible for regulating the range. 1983 */ 1984 if (clamp) 1985 delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC); 1986 1987 if (use_memdelay) 1988 psi_memstall_enter(&pflags); 1989 1990 exp = ktime_add_ns(now, delay_nsec); 1991 tok = io_schedule_prepare(); 1992 do { 1993 __set_current_state(TASK_KILLABLE); 1994 if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS)) 1995 break; 1996 } while (!fatal_signal_pending(current)); 1997 io_schedule_finish(tok); 1998 1999 if (use_memdelay) 2000 psi_memstall_leave(&pflags); 2001 } 2002 2003 /** 2004 * blkcg_maybe_throttle_current - throttle the current task if it has been marked 2005 * 2006 * This is only called if we've been marked with set_notify_resume(). Obviously 2007 * we can be set_notify_resume() for reasons other than blkcg throttling, so we 2008 * check to see if current->throttle_disk is set and if not this doesn't do 2009 * anything. This should only ever be called by the resume code, it's not meant 2010 * to be called by people willy-nilly as it will actually do the work to 2011 * throttle the task if it is setup for throttling. 2012 */ 2013 void blkcg_maybe_throttle_current(void) 2014 { 2015 struct gendisk *disk = current->throttle_disk; 2016 struct blkcg *blkcg; 2017 struct blkcg_gq *blkg; 2018 bool use_memdelay = current->use_memdelay; 2019 2020 if (!disk) 2021 return; 2022 2023 current->throttle_disk = NULL; 2024 current->use_memdelay = false; 2025 2026 rcu_read_lock(); 2027 blkcg = css_to_blkcg(blkcg_css()); 2028 if (!blkcg) 2029 goto out; 2030 blkg = blkg_lookup(blkcg, disk->queue); 2031 if (!blkg) 2032 goto out; 2033 if (!blkg_tryget(blkg)) 2034 goto out; 2035 rcu_read_unlock(); 2036 2037 blkcg_maybe_throttle_blkg(blkg, use_memdelay); 2038 blkg_put(blkg); 2039 put_disk(disk); 2040 return; 2041 out: 2042 rcu_read_unlock(); 2043 } 2044 2045 /** 2046 * blkcg_schedule_throttle - this task needs to check for throttling 2047 * @disk: disk to throttle 2048 * @use_memdelay: do we charge this to memory delay for PSI 2049 * 2050 * This is called by the IO controller when we know there's delay accumulated 2051 * for the blkg for this task. We do not pass the blkg because there are places 2052 * we call this that may not have that information, the swapping code for 2053 * instance will only have a block_device at that point. This set's the 2054 * notify_resume for the task to check and see if it requires throttling before 2055 * returning to user space. 2056 * 2057 * We will only schedule once per syscall. You can call this over and over 2058 * again and it will only do the check once upon return to user space, and only 2059 * throttle once. If the task needs to be throttled again it'll need to be 2060 * re-set at the next time we see the task. 2061 */ 2062 void blkcg_schedule_throttle(struct gendisk *disk, bool use_memdelay) 2063 { 2064 if (unlikely(current->flags & PF_KTHREAD)) 2065 return; 2066 2067 if (current->throttle_disk != disk) { 2068 if (test_bit(GD_DEAD, &disk->state)) 2069 return; 2070 get_device(disk_to_dev(disk)); 2071 2072 if (current->throttle_disk) 2073 put_disk(current->throttle_disk); 2074 current->throttle_disk = disk; 2075 } 2076 2077 if (use_memdelay) 2078 current->use_memdelay = use_memdelay; 2079 set_notify_resume(current); 2080 } 2081 2082 /** 2083 * blkcg_add_delay - add delay to this blkg 2084 * @blkg: blkg of interest 2085 * @now: the current time in nanoseconds 2086 * @delta: how many nanoseconds of delay to add 2087 * 2088 * Charge @delta to the blkg's current delay accumulation. This is used to 2089 * throttle tasks if an IO controller thinks we need more throttling. 2090 */ 2091 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta) 2092 { 2093 if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) 2094 return; 2095 blkcg_scale_delay(blkg, now); 2096 atomic64_add(delta, &blkg->delay_nsec); 2097 } 2098 2099 /** 2100 * blkg_tryget_closest - try and get a blkg ref on the closet blkg 2101 * @bio: target bio 2102 * @css: target css 2103 * 2104 * As the failure mode here is to walk up the blkg tree, this ensure that the 2105 * blkg->parent pointers are always valid. This returns the blkg that it ended 2106 * up taking a reference on or %NULL if no reference was taken. 2107 */ 2108 static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio, 2109 struct cgroup_subsys_state *css) 2110 { 2111 struct blkcg_gq *blkg, *ret_blkg = NULL; 2112 2113 rcu_read_lock(); 2114 blkg = blkg_lookup_create(css_to_blkcg(css), bio->bi_bdev->bd_disk); 2115 while (blkg) { 2116 if (blkg_tryget(blkg)) { 2117 ret_blkg = blkg; 2118 break; 2119 } 2120 blkg = blkg->parent; 2121 } 2122 rcu_read_unlock(); 2123 2124 return ret_blkg; 2125 } 2126 2127 /** 2128 * bio_associate_blkg_from_css - associate a bio with a specified css 2129 * @bio: target bio 2130 * @css: target css 2131 * 2132 * Associate @bio with the blkg found by combining the css's blkg and the 2133 * request_queue of the @bio. An association failure is handled by walking up 2134 * the blkg tree. Therefore, the blkg associated can be anything between @blkg 2135 * and q->root_blkg. This situation only happens when a cgroup is dying and 2136 * then the remaining bios will spill to the closest alive blkg. 2137 * 2138 * A reference will be taken on the blkg and will be released when @bio is 2139 * freed. 2140 */ 2141 void bio_associate_blkg_from_css(struct bio *bio, 2142 struct cgroup_subsys_state *css) 2143 { 2144 if (bio->bi_blkg) 2145 blkg_put(bio->bi_blkg); 2146 2147 if (css && css->parent) { 2148 bio->bi_blkg = blkg_tryget_closest(bio, css); 2149 } else { 2150 blkg_get(bdev_get_queue(bio->bi_bdev)->root_blkg); 2151 bio->bi_blkg = bdev_get_queue(bio->bi_bdev)->root_blkg; 2152 } 2153 } 2154 EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css); 2155 2156 /** 2157 * bio_associate_blkg - associate a bio with a blkg 2158 * @bio: target bio 2159 * 2160 * Associate @bio with the blkg found from the bio's css and request_queue. 2161 * If one is not found, bio_lookup_blkg() creates the blkg. If a blkg is 2162 * already associated, the css is reused and association redone as the 2163 * request_queue may have changed. 2164 */ 2165 void bio_associate_blkg(struct bio *bio) 2166 { 2167 struct cgroup_subsys_state *css; 2168 2169 if (blk_op_is_passthrough(bio->bi_opf)) 2170 return; 2171 2172 rcu_read_lock(); 2173 2174 if (bio->bi_blkg) 2175 css = bio_blkcg_css(bio); 2176 else 2177 css = blkcg_css(); 2178 2179 bio_associate_blkg_from_css(bio, css); 2180 2181 rcu_read_unlock(); 2182 } 2183 EXPORT_SYMBOL_GPL(bio_associate_blkg); 2184 2185 /** 2186 * bio_clone_blkg_association - clone blkg association from src to dst bio 2187 * @dst: destination bio 2188 * @src: source bio 2189 */ 2190 void bio_clone_blkg_association(struct bio *dst, struct bio *src) 2191 { 2192 if (src->bi_blkg) 2193 bio_associate_blkg_from_css(dst, bio_blkcg_css(src)); 2194 } 2195 EXPORT_SYMBOL_GPL(bio_clone_blkg_association); 2196 2197 static int blk_cgroup_io_type(struct bio *bio) 2198 { 2199 if (op_is_discard(bio->bi_opf)) 2200 return BLKG_IOSTAT_DISCARD; 2201 if (op_is_write(bio->bi_opf)) 2202 return BLKG_IOSTAT_WRITE; 2203 return BLKG_IOSTAT_READ; 2204 } 2205 2206 void blk_cgroup_bio_start(struct bio *bio) 2207 { 2208 struct blkcg *blkcg = bio->bi_blkg->blkcg; 2209 int rwd = blk_cgroup_io_type(bio), cpu; 2210 struct blkg_iostat_set *bis; 2211 unsigned long flags; 2212 2213 if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) 2214 return; 2215 2216 /* Root-level stats are sourced from system-wide IO stats */ 2217 if (!cgroup_parent(blkcg->css.cgroup)) 2218 return; 2219 2220 cpu = get_cpu(); 2221 bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu); 2222 flags = u64_stats_update_begin_irqsave(&bis->sync); 2223 2224 /* 2225 * If the bio is flagged with BIO_CGROUP_ACCT it means this is a split 2226 * bio and we would have already accounted for the size of the bio. 2227 */ 2228 if (!bio_flagged(bio, BIO_CGROUP_ACCT)) { 2229 bio_set_flag(bio, BIO_CGROUP_ACCT); 2230 bis->cur.bytes[rwd] += bio->bi_iter.bi_size; 2231 } 2232 bis->cur.ios[rwd]++; 2233 2234 /* 2235 * If the iostat_cpu isn't in a lockless list, put it into the 2236 * list to indicate that a stat update is pending. 2237 */ 2238 if (!READ_ONCE(bis->lqueued)) { 2239 struct llist_head *lhead = this_cpu_ptr(blkcg->lhead); 2240 2241 llist_add(&bis->lnode, lhead); 2242 WRITE_ONCE(bis->lqueued, true); 2243 } 2244 2245 u64_stats_update_end_irqrestore(&bis->sync, flags); 2246 css_rstat_updated(&blkcg->css, cpu); 2247 put_cpu(); 2248 } 2249 2250 bool blk_cgroup_congested(void) 2251 { 2252 struct blkcg *blkcg; 2253 bool ret = false; 2254 2255 rcu_read_lock(); 2256 for (blkcg = css_to_blkcg(blkcg_css()); blkcg; 2257 blkcg = blkcg_parent(blkcg)) { 2258 if (atomic_read(&blkcg->congestion_count)) { 2259 ret = true; 2260 break; 2261 } 2262 } 2263 rcu_read_unlock(); 2264 return ret; 2265 } 2266 2267 module_param(blkcg_debug_stats, bool, 0644); 2268 MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not"); 2269