1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Common Block IO controller cgroup interface 4 * 5 * Based on ideas and code from CFQ, CFS and BFQ: 6 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 7 * 8 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> 9 * Paolo Valente <paolo.valente@unimore.it> 10 * 11 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> 12 * Nauman Rafique <nauman@google.com> 13 * 14 * For policy-specific per-blkcg data: 15 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it> 16 * Arianna Avanzini <avanzini.arianna@gmail.com> 17 */ 18 #include <linux/ioprio.h> 19 #include <linux/kdev_t.h> 20 #include <linux/module.h> 21 #include <linux/sched/signal.h> 22 #include <linux/err.h> 23 #include <linux/blkdev.h> 24 #include <linux/backing-dev.h> 25 #include <linux/slab.h> 26 #include <linux/delay.h> 27 #include <linux/atomic.h> 28 #include <linux/ctype.h> 29 #include <linux/resume_user_mode.h> 30 #include <linux/psi.h> 31 #include <linux/part_stat.h> 32 #include "blk.h" 33 #include "blk-cgroup.h" 34 #include "blk-ioprio.h" 35 #include "blk-throttle.h" 36 37 static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu); 38 39 /* 40 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation. 41 * blkcg_pol_register_mutex nests outside of it and synchronizes entire 42 * policy [un]register operations including cgroup file additions / 43 * removals. Putting cgroup file registration outside blkcg_pol_mutex 44 * allows grabbing it from cgroup callbacks. 45 */ 46 static DEFINE_MUTEX(blkcg_pol_register_mutex); 47 static DEFINE_MUTEX(blkcg_pol_mutex); 48 49 struct blkcg blkcg_root; 50 EXPORT_SYMBOL_GPL(blkcg_root); 51 52 struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css; 53 EXPORT_SYMBOL_GPL(blkcg_root_css); 54 55 static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; 56 57 static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */ 58 59 bool blkcg_debug_stats = false; 60 61 static DEFINE_RAW_SPINLOCK(blkg_stat_lock); 62 63 #define BLKG_DESTROY_BATCH_SIZE 64 64 65 /* 66 * Lockless lists for tracking IO stats update 67 * 68 * New IO stats are stored in the percpu iostat_cpu within blkcg_gq (blkg). 69 * There are multiple blkg's (one for each block device) attached to each 70 * blkcg. The rstat code keeps track of which cpu has IO stats updated, 71 * but it doesn't know which blkg has the updated stats. If there are many 72 * block devices in a system, the cost of iterating all the blkg's to flush 73 * out the IO stats can be high. To reduce such overhead, a set of percpu 74 * lockless lists (lhead) per blkcg are used to track the set of recently 75 * updated iostat_cpu's since the last flush. An iostat_cpu will be put 76 * onto the lockless list on the update side [blk_cgroup_bio_start()] if 77 * not there yet and then removed when being flushed [blkcg_rstat_flush()]. 78 * References to blkg are gotten and then put back in the process to 79 * protect against blkg removal. 80 * 81 * Return: 0 if successful or -ENOMEM if allocation fails. 82 */ 83 static int init_blkcg_llists(struct blkcg *blkcg) 84 { 85 int cpu; 86 87 blkcg->lhead = alloc_percpu_gfp(struct llist_head, GFP_KERNEL); 88 if (!blkcg->lhead) 89 return -ENOMEM; 90 91 for_each_possible_cpu(cpu) 92 init_llist_head(per_cpu_ptr(blkcg->lhead, cpu)); 93 return 0; 94 } 95 96 /** 97 * blkcg_css - find the current css 98 * 99 * Find the css associated with either the kthread or the current task. 100 * This may return a dying css, so it is up to the caller to use tryget logic 101 * to confirm it is alive and well. 102 */ 103 static struct cgroup_subsys_state *blkcg_css(void) 104 { 105 struct cgroup_subsys_state *css; 106 107 css = kthread_blkcg(); 108 if (css) 109 return css; 110 return task_css(current, io_cgrp_id); 111 } 112 113 static bool blkcg_policy_enabled(struct request_queue *q, 114 const struct blkcg_policy *pol) 115 { 116 return pol && test_bit(pol->plid, q->blkcg_pols); 117 } 118 119 static void blkg_free_workfn(struct work_struct *work) 120 { 121 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, 122 free_work); 123 struct request_queue *q = blkg->q; 124 int i; 125 126 /* 127 * pd_free_fn() can also be called from blkcg_deactivate_policy(), 128 * in order to make sure pd_free_fn() is called in order, the deletion 129 * of the list blkg->q_node is delayed to here from blkg_destroy(), and 130 * blkcg_mutex is used to synchronize blkg_free_workfn() and 131 * blkcg_deactivate_policy(). 132 */ 133 mutex_lock(&q->blkcg_mutex); 134 for (i = 0; i < BLKCG_MAX_POLS; i++) 135 if (blkg->pd[i]) 136 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); 137 if (blkg->parent) 138 blkg_put(blkg->parent); 139 spin_lock_irq(&q->queue_lock); 140 list_del_init(&blkg->q_node); 141 spin_unlock_irq(&q->queue_lock); 142 mutex_unlock(&q->blkcg_mutex); 143 144 blk_put_queue(q); 145 free_percpu(blkg->iostat_cpu); 146 percpu_ref_exit(&blkg->refcnt); 147 kfree(blkg); 148 } 149 150 /** 151 * blkg_free - free a blkg 152 * @blkg: blkg to free 153 * 154 * Free @blkg which may be partially allocated. 155 */ 156 static void blkg_free(struct blkcg_gq *blkg) 157 { 158 if (!blkg) 159 return; 160 161 /* 162 * Both ->pd_free_fn() and request queue's release handler may 163 * sleep, so free us by scheduling one work func 164 */ 165 INIT_WORK(&blkg->free_work, blkg_free_workfn); 166 schedule_work(&blkg->free_work); 167 } 168 169 static void __blkg_release(struct rcu_head *rcu) 170 { 171 struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head); 172 struct blkcg *blkcg = blkg->blkcg; 173 int cpu; 174 175 #ifdef CONFIG_BLK_CGROUP_PUNT_BIO 176 WARN_ON(!bio_list_empty(&blkg->async_bios)); 177 #endif 178 /* 179 * Flush all the non-empty percpu lockless lists before releasing 180 * us, given these stat belongs to us. 181 * 182 * blkg_stat_lock is for serializing blkg stat update 183 */ 184 for_each_possible_cpu(cpu) 185 __blkcg_rstat_flush(blkcg, cpu); 186 187 /* release the blkcg and parent blkg refs this blkg has been holding */ 188 css_put(&blkg->blkcg->css); 189 blkg_free(blkg); 190 } 191 192 /* 193 * A group is RCU protected, but having an rcu lock does not mean that one 194 * can access all the fields of blkg and assume these are valid. For 195 * example, don't try to follow throtl_data and request queue links. 196 * 197 * Having a reference to blkg under an rcu allows accesses to only values 198 * local to groups like group stats and group rate limits. 199 */ 200 static void blkg_release(struct percpu_ref *ref) 201 { 202 struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt); 203 204 call_rcu(&blkg->rcu_head, __blkg_release); 205 } 206 207 #ifdef CONFIG_BLK_CGROUP_PUNT_BIO 208 static struct workqueue_struct *blkcg_punt_bio_wq; 209 210 static void blkg_async_bio_workfn(struct work_struct *work) 211 { 212 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, 213 async_bio_work); 214 struct bio_list bios = BIO_EMPTY_LIST; 215 struct bio *bio; 216 struct blk_plug plug; 217 bool need_plug = false; 218 219 /* as long as there are pending bios, @blkg can't go away */ 220 spin_lock(&blkg->async_bio_lock); 221 bio_list_merge_init(&bios, &blkg->async_bios); 222 spin_unlock(&blkg->async_bio_lock); 223 224 /* start plug only when bio_list contains at least 2 bios */ 225 if (bios.head && bios.head->bi_next) { 226 need_plug = true; 227 blk_start_plug(&plug); 228 } 229 while ((bio = bio_list_pop(&bios))) 230 submit_bio(bio); 231 if (need_plug) 232 blk_finish_plug(&plug); 233 } 234 235 /* 236 * When a shared kthread issues a bio for a cgroup, doing so synchronously can 237 * lead to priority inversions as the kthread can be trapped waiting for that 238 * cgroup. Use this helper instead of submit_bio to punt the actual issuing to 239 * a dedicated per-blkcg work item to avoid such priority inversions. 240 */ 241 void blkcg_punt_bio_submit(struct bio *bio) 242 { 243 struct blkcg_gq *blkg = bio->bi_blkg; 244 245 if (blkg->parent) { 246 spin_lock(&blkg->async_bio_lock); 247 bio_list_add(&blkg->async_bios, bio); 248 spin_unlock(&blkg->async_bio_lock); 249 queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work); 250 } else { 251 /* never bounce for the root cgroup */ 252 submit_bio(bio); 253 } 254 } 255 EXPORT_SYMBOL_GPL(blkcg_punt_bio_submit); 256 257 static int __init blkcg_punt_bio_init(void) 258 { 259 blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio", 260 WQ_MEM_RECLAIM | WQ_FREEZABLE | 261 WQ_UNBOUND | WQ_SYSFS, 0); 262 if (!blkcg_punt_bio_wq) 263 return -ENOMEM; 264 return 0; 265 } 266 subsys_initcall(blkcg_punt_bio_init); 267 #endif /* CONFIG_BLK_CGROUP_PUNT_BIO */ 268 269 /** 270 * bio_blkcg_css - return the blkcg CSS associated with a bio 271 * @bio: target bio 272 * 273 * This returns the CSS for the blkcg associated with a bio, or %NULL if not 274 * associated. Callers are expected to either handle %NULL or know association 275 * has been done prior to calling this. 276 */ 277 struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio) 278 { 279 if (!bio || !bio->bi_blkg) 280 return NULL; 281 return &bio->bi_blkg->blkcg->css; 282 } 283 EXPORT_SYMBOL_GPL(bio_blkcg_css); 284 285 /** 286 * blkcg_parent - get the parent of a blkcg 287 * @blkcg: blkcg of interest 288 * 289 * Return the parent blkcg of @blkcg. Can be called anytime. 290 */ 291 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg) 292 { 293 return css_to_blkcg(blkcg->css.parent); 294 } 295 296 /** 297 * blkg_alloc - allocate a blkg 298 * @blkcg: block cgroup the new blkg is associated with 299 * @disk: gendisk the new blkg is associated with 300 * @gfp_mask: allocation mask to use 301 * 302 * Allocate a new blkg associating @blkcg and @disk. 303 */ 304 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk, 305 gfp_t gfp_mask) 306 { 307 struct blkcg_gq *blkg; 308 int i, cpu; 309 310 /* alloc and init base part */ 311 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, disk->queue->node); 312 if (!blkg) 313 return NULL; 314 if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask)) 315 goto out_free_blkg; 316 blkg->iostat_cpu = alloc_percpu_gfp(struct blkg_iostat_set, gfp_mask); 317 if (!blkg->iostat_cpu) 318 goto out_exit_refcnt; 319 if (!blk_get_queue(disk->queue)) 320 goto out_free_iostat; 321 322 blkg->q = disk->queue; 323 INIT_LIST_HEAD(&blkg->q_node); 324 blkg->blkcg = blkcg; 325 #ifdef CONFIG_BLK_CGROUP_PUNT_BIO 326 spin_lock_init(&blkg->async_bio_lock); 327 bio_list_init(&blkg->async_bios); 328 INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn); 329 #endif 330 331 u64_stats_init(&blkg->iostat.sync); 332 for_each_possible_cpu(cpu) { 333 u64_stats_init(&per_cpu_ptr(blkg->iostat_cpu, cpu)->sync); 334 per_cpu_ptr(blkg->iostat_cpu, cpu)->blkg = blkg; 335 } 336 337 for (i = 0; i < BLKCG_MAX_POLS; i++) { 338 struct blkcg_policy *pol = blkcg_policy[i]; 339 struct blkg_policy_data *pd; 340 341 if (!blkcg_policy_enabled(disk->queue, pol)) 342 continue; 343 344 /* alloc per-policy data and attach it to blkg */ 345 pd = pol->pd_alloc_fn(disk, blkcg, gfp_mask); 346 if (!pd) 347 goto out_free_pds; 348 blkg->pd[i] = pd; 349 pd->blkg = blkg; 350 pd->plid = i; 351 pd->online = false; 352 } 353 354 return blkg; 355 356 out_free_pds: 357 while (--i >= 0) 358 if (blkg->pd[i]) 359 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); 360 blk_put_queue(disk->queue); 361 out_free_iostat: 362 free_percpu(blkg->iostat_cpu); 363 out_exit_refcnt: 364 percpu_ref_exit(&blkg->refcnt); 365 out_free_blkg: 366 kfree(blkg); 367 return NULL; 368 } 369 370 /* 371 * If @new_blkg is %NULL, this function tries to allocate a new one as 372 * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return. 373 */ 374 static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct gendisk *disk, 375 struct blkcg_gq *new_blkg) 376 { 377 struct blkcg_gq *blkg; 378 int i, ret; 379 380 lockdep_assert_held(&disk->queue->queue_lock); 381 382 /* request_queue is dying, do not create/recreate a blkg */ 383 if (blk_queue_dying(disk->queue)) { 384 ret = -ENODEV; 385 goto err_free_blkg; 386 } 387 388 /* blkg holds a reference to blkcg */ 389 if (!css_tryget_online(&blkcg->css)) { 390 ret = -ENODEV; 391 goto err_free_blkg; 392 } 393 394 /* allocate */ 395 if (!new_blkg) { 396 new_blkg = blkg_alloc(blkcg, disk, GFP_NOWAIT | __GFP_NOWARN); 397 if (unlikely(!new_blkg)) { 398 ret = -ENOMEM; 399 goto err_put_css; 400 } 401 } 402 blkg = new_blkg; 403 404 /* link parent */ 405 if (blkcg_parent(blkcg)) { 406 blkg->parent = blkg_lookup(blkcg_parent(blkcg), disk->queue); 407 if (WARN_ON_ONCE(!blkg->parent)) { 408 ret = -ENODEV; 409 goto err_put_css; 410 } 411 blkg_get(blkg->parent); 412 } 413 414 /* invoke per-policy init */ 415 for (i = 0; i < BLKCG_MAX_POLS; i++) { 416 struct blkcg_policy *pol = blkcg_policy[i]; 417 418 if (blkg->pd[i] && pol->pd_init_fn) 419 pol->pd_init_fn(blkg->pd[i]); 420 } 421 422 /* insert */ 423 spin_lock(&blkcg->lock); 424 ret = radix_tree_insert(&blkcg->blkg_tree, disk->queue->id, blkg); 425 if (likely(!ret)) { 426 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); 427 list_add(&blkg->q_node, &disk->queue->blkg_list); 428 429 for (i = 0; i < BLKCG_MAX_POLS; i++) { 430 struct blkcg_policy *pol = blkcg_policy[i]; 431 432 if (blkg->pd[i]) { 433 if (pol->pd_online_fn) 434 pol->pd_online_fn(blkg->pd[i]); 435 blkg->pd[i]->online = true; 436 } 437 } 438 } 439 blkg->online = true; 440 spin_unlock(&blkcg->lock); 441 442 if (!ret) 443 return blkg; 444 445 /* @blkg failed fully initialized, use the usual release path */ 446 blkg_put(blkg); 447 return ERR_PTR(ret); 448 449 err_put_css: 450 css_put(&blkcg->css); 451 err_free_blkg: 452 if (new_blkg) 453 blkg_free(new_blkg); 454 return ERR_PTR(ret); 455 } 456 457 /** 458 * blkg_lookup_create - lookup blkg, try to create one if not there 459 * @blkcg: blkcg of interest 460 * @disk: gendisk of interest 461 * 462 * Lookup blkg for the @blkcg - @disk pair. If it doesn't exist, try to 463 * create one. blkg creation is performed recursively from blkcg_root such 464 * that all non-root blkg's have access to the parent blkg. This function 465 * should be called under RCU read lock and takes @disk->queue->queue_lock. 466 * 467 * Returns the blkg or the closest blkg if blkg_create() fails as it walks 468 * down from root. 469 */ 470 static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, 471 struct gendisk *disk) 472 { 473 struct request_queue *q = disk->queue; 474 struct blkcg_gq *blkg; 475 unsigned long flags; 476 477 WARN_ON_ONCE(!rcu_read_lock_held()); 478 479 blkg = blkg_lookup(blkcg, q); 480 if (blkg) 481 return blkg; 482 483 spin_lock_irqsave(&q->queue_lock, flags); 484 blkg = blkg_lookup(blkcg, q); 485 if (blkg) { 486 if (blkcg != &blkcg_root && 487 blkg != rcu_dereference(blkcg->blkg_hint)) 488 rcu_assign_pointer(blkcg->blkg_hint, blkg); 489 goto found; 490 } 491 492 /* 493 * Create blkgs walking down from blkcg_root to @blkcg, so that all 494 * non-root blkgs have access to their parents. Returns the closest 495 * blkg to the intended blkg should blkg_create() fail. 496 */ 497 while (true) { 498 struct blkcg *pos = blkcg; 499 struct blkcg *parent = blkcg_parent(blkcg); 500 struct blkcg_gq *ret_blkg = q->root_blkg; 501 502 while (parent) { 503 blkg = blkg_lookup(parent, q); 504 if (blkg) { 505 /* remember closest blkg */ 506 ret_blkg = blkg; 507 break; 508 } 509 pos = parent; 510 parent = blkcg_parent(parent); 511 } 512 513 blkg = blkg_create(pos, disk, NULL); 514 if (IS_ERR(blkg)) { 515 blkg = ret_blkg; 516 break; 517 } 518 if (pos == blkcg) 519 break; 520 } 521 522 found: 523 spin_unlock_irqrestore(&q->queue_lock, flags); 524 return blkg; 525 } 526 527 static void blkg_destroy(struct blkcg_gq *blkg) 528 { 529 struct blkcg *blkcg = blkg->blkcg; 530 int i; 531 532 lockdep_assert_held(&blkg->q->queue_lock); 533 lockdep_assert_held(&blkcg->lock); 534 535 /* 536 * blkg stays on the queue list until blkg_free_workfn(), see details in 537 * blkg_free_workfn(), hence this function can be called from 538 * blkcg_destroy_blkgs() first and again from blkg_destroy_all() before 539 * blkg_free_workfn(). 540 */ 541 if (hlist_unhashed(&blkg->blkcg_node)) 542 return; 543 544 for (i = 0; i < BLKCG_MAX_POLS; i++) { 545 struct blkcg_policy *pol = blkcg_policy[i]; 546 547 if (blkg->pd[i] && blkg->pd[i]->online) { 548 blkg->pd[i]->online = false; 549 if (pol->pd_offline_fn) 550 pol->pd_offline_fn(blkg->pd[i]); 551 } 552 } 553 554 blkg->online = false; 555 556 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); 557 hlist_del_init_rcu(&blkg->blkcg_node); 558 559 /* 560 * Both setting lookup hint to and clearing it from @blkg are done 561 * under queue_lock. If it's not pointing to @blkg now, it never 562 * will. Hint assignment itself can race safely. 563 */ 564 if (rcu_access_pointer(blkcg->blkg_hint) == blkg) 565 rcu_assign_pointer(blkcg->blkg_hint, NULL); 566 567 /* 568 * Put the reference taken at the time of creation so that when all 569 * queues are gone, group can be destroyed. 570 */ 571 percpu_ref_kill(&blkg->refcnt); 572 } 573 574 static void blkg_destroy_all(struct gendisk *disk) 575 { 576 struct request_queue *q = disk->queue; 577 struct blkcg_gq *blkg; 578 int count = BLKG_DESTROY_BATCH_SIZE; 579 int i; 580 581 restart: 582 spin_lock_irq(&q->queue_lock); 583 list_for_each_entry(blkg, &q->blkg_list, q_node) { 584 struct blkcg *blkcg = blkg->blkcg; 585 586 if (hlist_unhashed(&blkg->blkcg_node)) 587 continue; 588 589 spin_lock(&blkcg->lock); 590 blkg_destroy(blkg); 591 spin_unlock(&blkcg->lock); 592 593 /* 594 * in order to avoid holding the spin lock for too long, release 595 * it when a batch of blkgs are destroyed. 596 */ 597 if (!(--count)) { 598 count = BLKG_DESTROY_BATCH_SIZE; 599 spin_unlock_irq(&q->queue_lock); 600 cond_resched(); 601 goto restart; 602 } 603 } 604 605 /* 606 * Mark policy deactivated since policy offline has been done, and 607 * the free is scheduled, so future blkcg_deactivate_policy() can 608 * be bypassed 609 */ 610 for (i = 0; i < BLKCG_MAX_POLS; i++) { 611 struct blkcg_policy *pol = blkcg_policy[i]; 612 613 if (pol) 614 __clear_bit(pol->plid, q->blkcg_pols); 615 } 616 617 q->root_blkg = NULL; 618 spin_unlock_irq(&q->queue_lock); 619 } 620 621 static int blkcg_reset_stats(struct cgroup_subsys_state *css, 622 struct cftype *cftype, u64 val) 623 { 624 struct blkcg *blkcg = css_to_blkcg(css); 625 struct blkcg_gq *blkg; 626 int i, cpu; 627 628 mutex_lock(&blkcg_pol_mutex); 629 spin_lock_irq(&blkcg->lock); 630 631 /* 632 * Note that stat reset is racy - it doesn't synchronize against 633 * stat updates. This is a debug feature which shouldn't exist 634 * anyway. If you get hit by a race, retry. 635 */ 636 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { 637 for_each_possible_cpu(cpu) { 638 struct blkg_iostat_set *bis = 639 per_cpu_ptr(blkg->iostat_cpu, cpu); 640 memset(bis, 0, sizeof(*bis)); 641 642 /* Re-initialize the cleared blkg_iostat_set */ 643 u64_stats_init(&bis->sync); 644 bis->blkg = blkg; 645 } 646 memset(&blkg->iostat, 0, sizeof(blkg->iostat)); 647 u64_stats_init(&blkg->iostat.sync); 648 649 for (i = 0; i < BLKCG_MAX_POLS; i++) { 650 struct blkcg_policy *pol = blkcg_policy[i]; 651 652 if (blkg->pd[i] && pol->pd_reset_stats_fn) 653 pol->pd_reset_stats_fn(blkg->pd[i]); 654 } 655 } 656 657 spin_unlock_irq(&blkcg->lock); 658 mutex_unlock(&blkcg_pol_mutex); 659 return 0; 660 } 661 662 const char *blkg_dev_name(struct blkcg_gq *blkg) 663 { 664 if (!blkg->q->disk) 665 return NULL; 666 return bdi_dev_name(blkg->q->disk->bdi); 667 } 668 669 /** 670 * blkcg_print_blkgs - helper for printing per-blkg data 671 * @sf: seq_file to print to 672 * @blkcg: blkcg of interest 673 * @prfill: fill function to print out a blkg 674 * @pol: policy in question 675 * @data: data to be passed to @prfill 676 * @show_total: to print out sum of prfill return values or not 677 * 678 * This function invokes @prfill on each blkg of @blkcg if pd for the 679 * policy specified by @pol exists. @prfill is invoked with @sf, the 680 * policy data and @data and the matching queue lock held. If @show_total 681 * is %true, the sum of the return values from @prfill is printed with 682 * "Total" label at the end. 683 * 684 * This is to be used to construct print functions for 685 * cftype->read_seq_string method. 686 */ 687 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, 688 u64 (*prfill)(struct seq_file *, 689 struct blkg_policy_data *, int), 690 const struct blkcg_policy *pol, int data, 691 bool show_total) 692 { 693 struct blkcg_gq *blkg; 694 u64 total = 0; 695 696 rcu_read_lock(); 697 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { 698 spin_lock_irq(&blkg->q->queue_lock); 699 if (blkcg_policy_enabled(blkg->q, pol)) 700 total += prfill(sf, blkg->pd[pol->plid], data); 701 spin_unlock_irq(&blkg->q->queue_lock); 702 } 703 rcu_read_unlock(); 704 705 if (show_total) 706 seq_printf(sf, "Total %llu\n", (unsigned long long)total); 707 } 708 EXPORT_SYMBOL_GPL(blkcg_print_blkgs); 709 710 /** 711 * __blkg_prfill_u64 - prfill helper for a single u64 value 712 * @sf: seq_file to print to 713 * @pd: policy private data of interest 714 * @v: value to print 715 * 716 * Print @v to @sf for the device associated with @pd. 717 */ 718 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) 719 { 720 const char *dname = blkg_dev_name(pd->blkg); 721 722 if (!dname) 723 return 0; 724 725 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); 726 return v; 727 } 728 EXPORT_SYMBOL_GPL(__blkg_prfill_u64); 729 730 /** 731 * blkg_conf_init - initialize a blkg_conf_ctx 732 * @ctx: blkg_conf_ctx to initialize 733 * @input: input string 734 * 735 * Initialize @ctx which can be used to parse blkg config input string @input. 736 * Once initialized, @ctx can be used with blkg_conf_open_bdev() and 737 * blkg_conf_prep(), and must be cleaned up with blkg_conf_exit(). 738 */ 739 void blkg_conf_init(struct blkg_conf_ctx *ctx, char *input) 740 { 741 *ctx = (struct blkg_conf_ctx){ .input = input }; 742 } 743 EXPORT_SYMBOL_GPL(blkg_conf_init); 744 745 /** 746 * blkg_conf_open_bdev - parse and open bdev for per-blkg config update 747 * @ctx: blkg_conf_ctx initialized with blkg_conf_init() 748 * 749 * Parse the device node prefix part, MAJ:MIN, of per-blkg config update from 750 * @ctx->input and get and store the matching bdev in @ctx->bdev. @ctx->body is 751 * set to point past the device node prefix. 752 * 753 * This function may be called multiple times on @ctx and the extra calls become 754 * NOOPs. blkg_conf_prep() implicitly calls this function. Use this function 755 * explicitly if bdev access is needed without resolving the blkcg / policy part 756 * of @ctx->input. Returns -errno on error. 757 */ 758 int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx) 759 { 760 char *input = ctx->input; 761 unsigned int major, minor; 762 struct block_device *bdev; 763 int key_len; 764 765 if (ctx->bdev) 766 return 0; 767 768 if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2) 769 return -EINVAL; 770 771 input += key_len; 772 if (!isspace(*input)) 773 return -EINVAL; 774 input = skip_spaces(input); 775 776 bdev = blkdev_get_no_open(MKDEV(major, minor)); 777 if (!bdev) 778 return -ENODEV; 779 if (bdev_is_partition(bdev)) { 780 blkdev_put_no_open(bdev); 781 return -ENODEV; 782 } 783 784 mutex_lock(&bdev->bd_queue->rq_qos_mutex); 785 if (!disk_live(bdev->bd_disk)) { 786 blkdev_put_no_open(bdev); 787 mutex_unlock(&bdev->bd_queue->rq_qos_mutex); 788 return -ENODEV; 789 } 790 791 ctx->body = input; 792 ctx->bdev = bdev; 793 return 0; 794 } 795 796 /** 797 * blkg_conf_prep - parse and prepare for per-blkg config update 798 * @blkcg: target block cgroup 799 * @pol: target policy 800 * @ctx: blkg_conf_ctx initialized with blkg_conf_init() 801 * 802 * Parse per-blkg config update from @ctx->input and initialize @ctx 803 * accordingly. On success, @ctx->body points to the part of @ctx->input 804 * following MAJ:MIN, @ctx->bdev points to the target block device and 805 * @ctx->blkg to the blkg being configured. 806 * 807 * blkg_conf_open_bdev() may be called on @ctx beforehand. On success, this 808 * function returns with queue lock held and must be followed by 809 * blkg_conf_exit(). 810 */ 811 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, 812 struct blkg_conf_ctx *ctx) 813 __acquires(&bdev->bd_queue->queue_lock) 814 { 815 struct gendisk *disk; 816 struct request_queue *q; 817 struct blkcg_gq *blkg; 818 int ret; 819 820 ret = blkg_conf_open_bdev(ctx); 821 if (ret) 822 return ret; 823 824 disk = ctx->bdev->bd_disk; 825 q = disk->queue; 826 827 /* 828 * blkcg_deactivate_policy() requires queue to be frozen, we can grab 829 * q_usage_counter to prevent concurrent with blkcg_deactivate_policy(). 830 */ 831 ret = blk_queue_enter(q, 0); 832 if (ret) 833 goto fail; 834 835 spin_lock_irq(&q->queue_lock); 836 837 if (!blkcg_policy_enabled(q, pol)) { 838 ret = -EOPNOTSUPP; 839 goto fail_unlock; 840 } 841 842 blkg = blkg_lookup(blkcg, q); 843 if (blkg) 844 goto success; 845 846 /* 847 * Create blkgs walking down from blkcg_root to @blkcg, so that all 848 * non-root blkgs have access to their parents. 849 */ 850 while (true) { 851 struct blkcg *pos = blkcg; 852 struct blkcg *parent; 853 struct blkcg_gq *new_blkg; 854 855 parent = blkcg_parent(blkcg); 856 while (parent && !blkg_lookup(parent, q)) { 857 pos = parent; 858 parent = blkcg_parent(parent); 859 } 860 861 /* Drop locks to do new blkg allocation with GFP_KERNEL. */ 862 spin_unlock_irq(&q->queue_lock); 863 864 new_blkg = blkg_alloc(pos, disk, GFP_KERNEL); 865 if (unlikely(!new_blkg)) { 866 ret = -ENOMEM; 867 goto fail_exit_queue; 868 } 869 870 if (radix_tree_preload(GFP_KERNEL)) { 871 blkg_free(new_blkg); 872 ret = -ENOMEM; 873 goto fail_exit_queue; 874 } 875 876 spin_lock_irq(&q->queue_lock); 877 878 if (!blkcg_policy_enabled(q, pol)) { 879 blkg_free(new_blkg); 880 ret = -EOPNOTSUPP; 881 goto fail_preloaded; 882 } 883 884 blkg = blkg_lookup(pos, q); 885 if (blkg) { 886 blkg_free(new_blkg); 887 } else { 888 blkg = blkg_create(pos, disk, new_blkg); 889 if (IS_ERR(blkg)) { 890 ret = PTR_ERR(blkg); 891 goto fail_preloaded; 892 } 893 } 894 895 radix_tree_preload_end(); 896 897 if (pos == blkcg) 898 goto success; 899 } 900 success: 901 blk_queue_exit(q); 902 ctx->blkg = blkg; 903 return 0; 904 905 fail_preloaded: 906 radix_tree_preload_end(); 907 fail_unlock: 908 spin_unlock_irq(&q->queue_lock); 909 fail_exit_queue: 910 blk_queue_exit(q); 911 fail: 912 /* 913 * If queue was bypassing, we should retry. Do so after a 914 * short msleep(). It isn't strictly necessary but queue 915 * can be bypassing for some time and it's always nice to 916 * avoid busy looping. 917 */ 918 if (ret == -EBUSY) { 919 msleep(10); 920 ret = restart_syscall(); 921 } 922 return ret; 923 } 924 EXPORT_SYMBOL_GPL(blkg_conf_prep); 925 926 /** 927 * blkg_conf_exit - clean up per-blkg config update 928 * @ctx: blkg_conf_ctx initialized with blkg_conf_init() 929 * 930 * Clean up after per-blkg config update. This function must be called on all 931 * blkg_conf_ctx's initialized with blkg_conf_init(). 932 */ 933 void blkg_conf_exit(struct blkg_conf_ctx *ctx) 934 __releases(&ctx->bdev->bd_queue->queue_lock) 935 __releases(&ctx->bdev->bd_queue->rq_qos_mutex) 936 { 937 if (ctx->blkg) { 938 spin_unlock_irq(&bdev_get_queue(ctx->bdev)->queue_lock); 939 ctx->blkg = NULL; 940 } 941 942 if (ctx->bdev) { 943 mutex_unlock(&ctx->bdev->bd_queue->rq_qos_mutex); 944 blkdev_put_no_open(ctx->bdev); 945 ctx->body = NULL; 946 ctx->bdev = NULL; 947 } 948 } 949 EXPORT_SYMBOL_GPL(blkg_conf_exit); 950 951 static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src) 952 { 953 int i; 954 955 for (i = 0; i < BLKG_IOSTAT_NR; i++) { 956 dst->bytes[i] = src->bytes[i]; 957 dst->ios[i] = src->ios[i]; 958 } 959 } 960 961 static void blkg_iostat_add(struct blkg_iostat *dst, struct blkg_iostat *src) 962 { 963 int i; 964 965 for (i = 0; i < BLKG_IOSTAT_NR; i++) { 966 dst->bytes[i] += src->bytes[i]; 967 dst->ios[i] += src->ios[i]; 968 } 969 } 970 971 static void blkg_iostat_sub(struct blkg_iostat *dst, struct blkg_iostat *src) 972 { 973 int i; 974 975 for (i = 0; i < BLKG_IOSTAT_NR; i++) { 976 dst->bytes[i] -= src->bytes[i]; 977 dst->ios[i] -= src->ios[i]; 978 } 979 } 980 981 static void blkcg_iostat_update(struct blkcg_gq *blkg, struct blkg_iostat *cur, 982 struct blkg_iostat *last) 983 { 984 struct blkg_iostat delta; 985 unsigned long flags; 986 987 /* propagate percpu delta to global */ 988 flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); 989 blkg_iostat_set(&delta, cur); 990 blkg_iostat_sub(&delta, last); 991 blkg_iostat_add(&blkg->iostat.cur, &delta); 992 blkg_iostat_add(last, &delta); 993 u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); 994 } 995 996 static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu) 997 { 998 struct llist_head *lhead = per_cpu_ptr(blkcg->lhead, cpu); 999 struct llist_node *lnode; 1000 struct blkg_iostat_set *bisc, *next_bisc; 1001 unsigned long flags; 1002 1003 rcu_read_lock(); 1004 1005 lnode = llist_del_all(lhead); 1006 if (!lnode) 1007 goto out; 1008 1009 /* 1010 * For covering concurrent parent blkg update from blkg_release(). 1011 * 1012 * When flushing from cgroup, cgroup_rstat_lock is always held, so 1013 * this lock won't cause contention most of time. 1014 */ 1015 raw_spin_lock_irqsave(&blkg_stat_lock, flags); 1016 1017 /* 1018 * Iterate only the iostat_cpu's queued in the lockless list. 1019 */ 1020 llist_for_each_entry_safe(bisc, next_bisc, lnode, lnode) { 1021 struct blkcg_gq *blkg = bisc->blkg; 1022 struct blkcg_gq *parent = blkg->parent; 1023 struct blkg_iostat cur; 1024 unsigned int seq; 1025 1026 WRITE_ONCE(bisc->lqueued, false); 1027 1028 /* fetch the current per-cpu values */ 1029 do { 1030 seq = u64_stats_fetch_begin(&bisc->sync); 1031 blkg_iostat_set(&cur, &bisc->cur); 1032 } while (u64_stats_fetch_retry(&bisc->sync, seq)); 1033 1034 blkcg_iostat_update(blkg, &cur, &bisc->last); 1035 1036 /* propagate global delta to parent (unless that's root) */ 1037 if (parent && parent->parent) 1038 blkcg_iostat_update(parent, &blkg->iostat.cur, 1039 &blkg->iostat.last); 1040 } 1041 raw_spin_unlock_irqrestore(&blkg_stat_lock, flags); 1042 out: 1043 rcu_read_unlock(); 1044 } 1045 1046 static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu) 1047 { 1048 /* Root-level stats are sourced from system-wide IO stats */ 1049 if (cgroup_parent(css->cgroup)) 1050 __blkcg_rstat_flush(css_to_blkcg(css), cpu); 1051 } 1052 1053 /* 1054 * We source root cgroup stats from the system-wide stats to avoid 1055 * tracking the same information twice and incurring overhead when no 1056 * cgroups are defined. For that reason, cgroup_rstat_flush in 1057 * blkcg_print_stat does not actually fill out the iostat in the root 1058 * cgroup's blkcg_gq. 1059 * 1060 * However, we would like to re-use the printing code between the root and 1061 * non-root cgroups to the extent possible. For that reason, we simulate 1062 * flushing the root cgroup's stats by explicitly filling in the iostat 1063 * with disk level statistics. 1064 */ 1065 static void blkcg_fill_root_iostats(void) 1066 { 1067 struct class_dev_iter iter; 1068 struct device *dev; 1069 1070 class_dev_iter_init(&iter, &block_class, NULL, &disk_type); 1071 while ((dev = class_dev_iter_next(&iter))) { 1072 struct block_device *bdev = dev_to_bdev(dev); 1073 struct blkcg_gq *blkg = bdev->bd_disk->queue->root_blkg; 1074 struct blkg_iostat tmp; 1075 int cpu; 1076 unsigned long flags; 1077 1078 memset(&tmp, 0, sizeof(tmp)); 1079 for_each_possible_cpu(cpu) { 1080 struct disk_stats *cpu_dkstats; 1081 1082 cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu); 1083 tmp.ios[BLKG_IOSTAT_READ] += 1084 cpu_dkstats->ios[STAT_READ]; 1085 tmp.ios[BLKG_IOSTAT_WRITE] += 1086 cpu_dkstats->ios[STAT_WRITE]; 1087 tmp.ios[BLKG_IOSTAT_DISCARD] += 1088 cpu_dkstats->ios[STAT_DISCARD]; 1089 // convert sectors to bytes 1090 tmp.bytes[BLKG_IOSTAT_READ] += 1091 cpu_dkstats->sectors[STAT_READ] << 9; 1092 tmp.bytes[BLKG_IOSTAT_WRITE] += 1093 cpu_dkstats->sectors[STAT_WRITE] << 9; 1094 tmp.bytes[BLKG_IOSTAT_DISCARD] += 1095 cpu_dkstats->sectors[STAT_DISCARD] << 9; 1096 } 1097 1098 flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); 1099 blkg_iostat_set(&blkg->iostat.cur, &tmp); 1100 u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); 1101 } 1102 } 1103 1104 static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s) 1105 { 1106 struct blkg_iostat_set *bis = &blkg->iostat; 1107 u64 rbytes, wbytes, rios, wios, dbytes, dios; 1108 const char *dname; 1109 unsigned seq; 1110 int i; 1111 1112 if (!blkg->online) 1113 return; 1114 1115 dname = blkg_dev_name(blkg); 1116 if (!dname) 1117 return; 1118 1119 seq_printf(s, "%s ", dname); 1120 1121 do { 1122 seq = u64_stats_fetch_begin(&bis->sync); 1123 1124 rbytes = bis->cur.bytes[BLKG_IOSTAT_READ]; 1125 wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE]; 1126 dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD]; 1127 rios = bis->cur.ios[BLKG_IOSTAT_READ]; 1128 wios = bis->cur.ios[BLKG_IOSTAT_WRITE]; 1129 dios = bis->cur.ios[BLKG_IOSTAT_DISCARD]; 1130 } while (u64_stats_fetch_retry(&bis->sync, seq)); 1131 1132 if (rbytes || wbytes || rios || wios) { 1133 seq_printf(s, "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu", 1134 rbytes, wbytes, rios, wios, 1135 dbytes, dios); 1136 } 1137 1138 if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) { 1139 seq_printf(s, " use_delay=%d delay_nsec=%llu", 1140 atomic_read(&blkg->use_delay), 1141 atomic64_read(&blkg->delay_nsec)); 1142 } 1143 1144 for (i = 0; i < BLKCG_MAX_POLS; i++) { 1145 struct blkcg_policy *pol = blkcg_policy[i]; 1146 1147 if (!blkg->pd[i] || !pol->pd_stat_fn) 1148 continue; 1149 1150 pol->pd_stat_fn(blkg->pd[i], s); 1151 } 1152 1153 seq_puts(s, "\n"); 1154 } 1155 1156 static int blkcg_print_stat(struct seq_file *sf, void *v) 1157 { 1158 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); 1159 struct blkcg_gq *blkg; 1160 1161 if (!seq_css(sf)->parent) 1162 blkcg_fill_root_iostats(); 1163 else 1164 cgroup_rstat_flush(blkcg->css.cgroup); 1165 1166 rcu_read_lock(); 1167 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { 1168 spin_lock_irq(&blkg->q->queue_lock); 1169 blkcg_print_one_stat(blkg, sf); 1170 spin_unlock_irq(&blkg->q->queue_lock); 1171 } 1172 rcu_read_unlock(); 1173 return 0; 1174 } 1175 1176 static struct cftype blkcg_files[] = { 1177 { 1178 .name = "stat", 1179 .seq_show = blkcg_print_stat, 1180 }, 1181 { } /* terminate */ 1182 }; 1183 1184 static struct cftype blkcg_legacy_files[] = { 1185 { 1186 .name = "reset_stats", 1187 .write_u64 = blkcg_reset_stats, 1188 }, 1189 { } /* terminate */ 1190 }; 1191 1192 #ifdef CONFIG_CGROUP_WRITEBACK 1193 struct list_head *blkcg_get_cgwb_list(struct cgroup_subsys_state *css) 1194 { 1195 return &css_to_blkcg(css)->cgwb_list; 1196 } 1197 #endif 1198 1199 /* 1200 * blkcg destruction is a three-stage process. 1201 * 1202 * 1. Destruction starts. The blkcg_css_offline() callback is invoked 1203 * which offlines writeback. Here we tie the next stage of blkg destruction 1204 * to the completion of writeback associated with the blkcg. This lets us 1205 * avoid punting potentially large amounts of outstanding writeback to root 1206 * while maintaining any ongoing policies. The next stage is triggered when 1207 * the nr_cgwbs count goes to zero. 1208 * 1209 * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called 1210 * and handles the destruction of blkgs. Here the css reference held by 1211 * the blkg is put back eventually allowing blkcg_css_free() to be called. 1212 * This work may occur in cgwb_release_workfn() on the cgwb_release 1213 * workqueue. Any submitted ios that fail to get the blkg ref will be 1214 * punted to the root_blkg. 1215 * 1216 * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called. 1217 * This finally frees the blkcg. 1218 */ 1219 1220 /** 1221 * blkcg_destroy_blkgs - responsible for shooting down blkgs 1222 * @blkcg: blkcg of interest 1223 * 1224 * blkgs should be removed while holding both q and blkcg locks. As blkcg lock 1225 * is nested inside q lock, this function performs reverse double lock dancing. 1226 * Destroying the blkgs releases the reference held on the blkcg's css allowing 1227 * blkcg_css_free to eventually be called. 1228 * 1229 * This is the blkcg counterpart of ioc_release_fn(). 1230 */ 1231 static void blkcg_destroy_blkgs(struct blkcg *blkcg) 1232 { 1233 might_sleep(); 1234 1235 spin_lock_irq(&blkcg->lock); 1236 1237 while (!hlist_empty(&blkcg->blkg_list)) { 1238 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, 1239 struct blkcg_gq, blkcg_node); 1240 struct request_queue *q = blkg->q; 1241 1242 if (need_resched() || !spin_trylock(&q->queue_lock)) { 1243 /* 1244 * Given that the system can accumulate a huge number 1245 * of blkgs in pathological cases, check to see if we 1246 * need to rescheduling to avoid softlockup. 1247 */ 1248 spin_unlock_irq(&blkcg->lock); 1249 cond_resched(); 1250 spin_lock_irq(&blkcg->lock); 1251 continue; 1252 } 1253 1254 blkg_destroy(blkg); 1255 spin_unlock(&q->queue_lock); 1256 } 1257 1258 spin_unlock_irq(&blkcg->lock); 1259 } 1260 1261 /** 1262 * blkcg_pin_online - pin online state 1263 * @blkcg_css: blkcg of interest 1264 * 1265 * While pinned, a blkcg is kept online. This is primarily used to 1266 * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline 1267 * while an associated cgwb is still active. 1268 */ 1269 void blkcg_pin_online(struct cgroup_subsys_state *blkcg_css) 1270 { 1271 refcount_inc(&css_to_blkcg(blkcg_css)->online_pin); 1272 } 1273 1274 /** 1275 * blkcg_unpin_online - unpin online state 1276 * @blkcg_css: blkcg of interest 1277 * 1278 * This is primarily used to impedance-match blkg and cgwb lifetimes so 1279 * that blkg doesn't go offline while an associated cgwb is still active. 1280 * When this count goes to zero, all active cgwbs have finished so the 1281 * blkcg can continue destruction by calling blkcg_destroy_blkgs(). 1282 */ 1283 void blkcg_unpin_online(struct cgroup_subsys_state *blkcg_css) 1284 { 1285 struct blkcg *blkcg = css_to_blkcg(blkcg_css); 1286 1287 do { 1288 if (!refcount_dec_and_test(&blkcg->online_pin)) 1289 break; 1290 blkcg_destroy_blkgs(blkcg); 1291 blkcg = blkcg_parent(blkcg); 1292 } while (blkcg); 1293 } 1294 1295 /** 1296 * blkcg_css_offline - cgroup css_offline callback 1297 * @css: css of interest 1298 * 1299 * This function is called when @css is about to go away. Here the cgwbs are 1300 * offlined first and only once writeback associated with the blkcg has 1301 * finished do we start step 2 (see above). 1302 */ 1303 static void blkcg_css_offline(struct cgroup_subsys_state *css) 1304 { 1305 /* this prevents anyone from attaching or migrating to this blkcg */ 1306 wb_blkcg_offline(css); 1307 1308 /* put the base online pin allowing step 2 to be triggered */ 1309 blkcg_unpin_online(css); 1310 } 1311 1312 static void blkcg_css_free(struct cgroup_subsys_state *css) 1313 { 1314 struct blkcg *blkcg = css_to_blkcg(css); 1315 int i; 1316 1317 mutex_lock(&blkcg_pol_mutex); 1318 1319 list_del(&blkcg->all_blkcgs_node); 1320 1321 for (i = 0; i < BLKCG_MAX_POLS; i++) 1322 if (blkcg->cpd[i]) 1323 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); 1324 1325 mutex_unlock(&blkcg_pol_mutex); 1326 1327 free_percpu(blkcg->lhead); 1328 kfree(blkcg); 1329 } 1330 1331 static struct cgroup_subsys_state * 1332 blkcg_css_alloc(struct cgroup_subsys_state *parent_css) 1333 { 1334 struct blkcg *blkcg; 1335 int i; 1336 1337 mutex_lock(&blkcg_pol_mutex); 1338 1339 if (!parent_css) { 1340 blkcg = &blkcg_root; 1341 } else { 1342 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); 1343 if (!blkcg) 1344 goto unlock; 1345 } 1346 1347 if (init_blkcg_llists(blkcg)) 1348 goto free_blkcg; 1349 1350 for (i = 0; i < BLKCG_MAX_POLS ; i++) { 1351 struct blkcg_policy *pol = blkcg_policy[i]; 1352 struct blkcg_policy_data *cpd; 1353 1354 /* 1355 * If the policy hasn't been attached yet, wait for it 1356 * to be attached before doing anything else. Otherwise, 1357 * check if the policy requires any specific per-cgroup 1358 * data: if it does, allocate and initialize it. 1359 */ 1360 if (!pol || !pol->cpd_alloc_fn) 1361 continue; 1362 1363 cpd = pol->cpd_alloc_fn(GFP_KERNEL); 1364 if (!cpd) 1365 goto free_pd_blkcg; 1366 1367 blkcg->cpd[i] = cpd; 1368 cpd->blkcg = blkcg; 1369 cpd->plid = i; 1370 } 1371 1372 spin_lock_init(&blkcg->lock); 1373 refcount_set(&blkcg->online_pin, 1); 1374 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN); 1375 INIT_HLIST_HEAD(&blkcg->blkg_list); 1376 #ifdef CONFIG_CGROUP_WRITEBACK 1377 INIT_LIST_HEAD(&blkcg->cgwb_list); 1378 #endif 1379 list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs); 1380 1381 mutex_unlock(&blkcg_pol_mutex); 1382 return &blkcg->css; 1383 1384 free_pd_blkcg: 1385 for (i--; i >= 0; i--) 1386 if (blkcg->cpd[i]) 1387 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); 1388 free_percpu(blkcg->lhead); 1389 free_blkcg: 1390 if (blkcg != &blkcg_root) 1391 kfree(blkcg); 1392 unlock: 1393 mutex_unlock(&blkcg_pol_mutex); 1394 return ERR_PTR(-ENOMEM); 1395 } 1396 1397 static int blkcg_css_online(struct cgroup_subsys_state *css) 1398 { 1399 struct blkcg *parent = blkcg_parent(css_to_blkcg(css)); 1400 1401 /* 1402 * blkcg_pin_online() is used to delay blkcg offline so that blkgs 1403 * don't go offline while cgwbs are still active on them. Pin the 1404 * parent so that offline always happens towards the root. 1405 */ 1406 if (parent) 1407 blkcg_pin_online(&parent->css); 1408 return 0; 1409 } 1410 1411 void blkg_init_queue(struct request_queue *q) 1412 { 1413 INIT_LIST_HEAD(&q->blkg_list); 1414 mutex_init(&q->blkcg_mutex); 1415 } 1416 1417 int blkcg_init_disk(struct gendisk *disk) 1418 { 1419 struct request_queue *q = disk->queue; 1420 struct blkcg_gq *new_blkg, *blkg; 1421 bool preloaded; 1422 int ret; 1423 1424 new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL); 1425 if (!new_blkg) 1426 return -ENOMEM; 1427 1428 preloaded = !radix_tree_preload(GFP_KERNEL); 1429 1430 /* Make sure the root blkg exists. */ 1431 /* spin_lock_irq can serve as RCU read-side critical section. */ 1432 spin_lock_irq(&q->queue_lock); 1433 blkg = blkg_create(&blkcg_root, disk, new_blkg); 1434 if (IS_ERR(blkg)) 1435 goto err_unlock; 1436 q->root_blkg = blkg; 1437 spin_unlock_irq(&q->queue_lock); 1438 1439 if (preloaded) 1440 radix_tree_preload_end(); 1441 1442 ret = blk_ioprio_init(disk); 1443 if (ret) 1444 goto err_destroy_all; 1445 1446 return 0; 1447 1448 err_destroy_all: 1449 blkg_destroy_all(disk); 1450 return ret; 1451 err_unlock: 1452 spin_unlock_irq(&q->queue_lock); 1453 if (preloaded) 1454 radix_tree_preload_end(); 1455 return PTR_ERR(blkg); 1456 } 1457 1458 void blkcg_exit_disk(struct gendisk *disk) 1459 { 1460 blkg_destroy_all(disk); 1461 blk_throtl_exit(disk); 1462 } 1463 1464 static void blkcg_exit(struct task_struct *tsk) 1465 { 1466 if (tsk->throttle_disk) 1467 put_disk(tsk->throttle_disk); 1468 tsk->throttle_disk = NULL; 1469 } 1470 1471 struct cgroup_subsys io_cgrp_subsys = { 1472 .css_alloc = blkcg_css_alloc, 1473 .css_online = blkcg_css_online, 1474 .css_offline = blkcg_css_offline, 1475 .css_free = blkcg_css_free, 1476 .css_rstat_flush = blkcg_rstat_flush, 1477 .dfl_cftypes = blkcg_files, 1478 .legacy_cftypes = blkcg_legacy_files, 1479 .legacy_name = "blkio", 1480 .exit = blkcg_exit, 1481 #ifdef CONFIG_MEMCG 1482 /* 1483 * This ensures that, if available, memcg is automatically enabled 1484 * together on the default hierarchy so that the owner cgroup can 1485 * be retrieved from writeback pages. 1486 */ 1487 .depends_on = 1 << memory_cgrp_id, 1488 #endif 1489 }; 1490 EXPORT_SYMBOL_GPL(io_cgrp_subsys); 1491 1492 /** 1493 * blkcg_activate_policy - activate a blkcg policy on a gendisk 1494 * @disk: gendisk of interest 1495 * @pol: blkcg policy to activate 1496 * 1497 * Activate @pol on @disk. Requires %GFP_KERNEL context. @disk goes through 1498 * bypass mode to populate its blkgs with policy_data for @pol. 1499 * 1500 * Activation happens with @disk bypassed, so nobody would be accessing blkgs 1501 * from IO path. Update of each blkg is protected by both queue and blkcg 1502 * locks so that holding either lock and testing blkcg_policy_enabled() is 1503 * always enough for dereferencing policy data. 1504 * 1505 * The caller is responsible for synchronizing [de]activations and policy 1506 * [un]registerations. Returns 0 on success, -errno on failure. 1507 */ 1508 int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol) 1509 { 1510 struct request_queue *q = disk->queue; 1511 struct blkg_policy_data *pd_prealloc = NULL; 1512 struct blkcg_gq *blkg, *pinned_blkg = NULL; 1513 int ret; 1514 1515 if (blkcg_policy_enabled(q, pol)) 1516 return 0; 1517 1518 if (queue_is_mq(q)) 1519 blk_mq_freeze_queue(q); 1520 retry: 1521 spin_lock_irq(&q->queue_lock); 1522 1523 /* blkg_list is pushed at the head, reverse walk to initialize parents first */ 1524 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) { 1525 struct blkg_policy_data *pd; 1526 1527 if (blkg->pd[pol->plid]) 1528 continue; 1529 1530 /* If prealloc matches, use it; otherwise try GFP_NOWAIT */ 1531 if (blkg == pinned_blkg) { 1532 pd = pd_prealloc; 1533 pd_prealloc = NULL; 1534 } else { 1535 pd = pol->pd_alloc_fn(disk, blkg->blkcg, 1536 GFP_NOWAIT | __GFP_NOWARN); 1537 } 1538 1539 if (!pd) { 1540 /* 1541 * GFP_NOWAIT failed. Free the existing one and 1542 * prealloc for @blkg w/ GFP_KERNEL. 1543 */ 1544 if (pinned_blkg) 1545 blkg_put(pinned_blkg); 1546 blkg_get(blkg); 1547 pinned_blkg = blkg; 1548 1549 spin_unlock_irq(&q->queue_lock); 1550 1551 if (pd_prealloc) 1552 pol->pd_free_fn(pd_prealloc); 1553 pd_prealloc = pol->pd_alloc_fn(disk, blkg->blkcg, 1554 GFP_KERNEL); 1555 if (pd_prealloc) 1556 goto retry; 1557 else 1558 goto enomem; 1559 } 1560 1561 spin_lock(&blkg->blkcg->lock); 1562 1563 pd->blkg = blkg; 1564 pd->plid = pol->plid; 1565 blkg->pd[pol->plid] = pd; 1566 1567 if (pol->pd_init_fn) 1568 pol->pd_init_fn(pd); 1569 1570 if (pol->pd_online_fn) 1571 pol->pd_online_fn(pd); 1572 pd->online = true; 1573 1574 spin_unlock(&blkg->blkcg->lock); 1575 } 1576 1577 __set_bit(pol->plid, q->blkcg_pols); 1578 ret = 0; 1579 1580 spin_unlock_irq(&q->queue_lock); 1581 out: 1582 if (queue_is_mq(q)) 1583 blk_mq_unfreeze_queue(q); 1584 if (pinned_blkg) 1585 blkg_put(pinned_blkg); 1586 if (pd_prealloc) 1587 pol->pd_free_fn(pd_prealloc); 1588 return ret; 1589 1590 enomem: 1591 /* alloc failed, take down everything */ 1592 spin_lock_irq(&q->queue_lock); 1593 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1594 struct blkcg *blkcg = blkg->blkcg; 1595 struct blkg_policy_data *pd; 1596 1597 spin_lock(&blkcg->lock); 1598 pd = blkg->pd[pol->plid]; 1599 if (pd) { 1600 if (pd->online && pol->pd_offline_fn) 1601 pol->pd_offline_fn(pd); 1602 pd->online = false; 1603 pol->pd_free_fn(pd); 1604 blkg->pd[pol->plid] = NULL; 1605 } 1606 spin_unlock(&blkcg->lock); 1607 } 1608 spin_unlock_irq(&q->queue_lock); 1609 ret = -ENOMEM; 1610 goto out; 1611 } 1612 EXPORT_SYMBOL_GPL(blkcg_activate_policy); 1613 1614 /** 1615 * blkcg_deactivate_policy - deactivate a blkcg policy on a gendisk 1616 * @disk: gendisk of interest 1617 * @pol: blkcg policy to deactivate 1618 * 1619 * Deactivate @pol on @disk. Follows the same synchronization rules as 1620 * blkcg_activate_policy(). 1621 */ 1622 void blkcg_deactivate_policy(struct gendisk *disk, 1623 const struct blkcg_policy *pol) 1624 { 1625 struct request_queue *q = disk->queue; 1626 struct blkcg_gq *blkg; 1627 1628 if (!blkcg_policy_enabled(q, pol)) 1629 return; 1630 1631 if (queue_is_mq(q)) 1632 blk_mq_freeze_queue(q); 1633 1634 mutex_lock(&q->blkcg_mutex); 1635 spin_lock_irq(&q->queue_lock); 1636 1637 __clear_bit(pol->plid, q->blkcg_pols); 1638 1639 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1640 struct blkcg *blkcg = blkg->blkcg; 1641 1642 spin_lock(&blkcg->lock); 1643 if (blkg->pd[pol->plid]) { 1644 if (blkg->pd[pol->plid]->online && pol->pd_offline_fn) 1645 pol->pd_offline_fn(blkg->pd[pol->plid]); 1646 pol->pd_free_fn(blkg->pd[pol->plid]); 1647 blkg->pd[pol->plid] = NULL; 1648 } 1649 spin_unlock(&blkcg->lock); 1650 } 1651 1652 spin_unlock_irq(&q->queue_lock); 1653 mutex_unlock(&q->blkcg_mutex); 1654 1655 if (queue_is_mq(q)) 1656 blk_mq_unfreeze_queue(q); 1657 } 1658 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); 1659 1660 static void blkcg_free_all_cpd(struct blkcg_policy *pol) 1661 { 1662 struct blkcg *blkcg; 1663 1664 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { 1665 if (blkcg->cpd[pol->plid]) { 1666 pol->cpd_free_fn(blkcg->cpd[pol->plid]); 1667 blkcg->cpd[pol->plid] = NULL; 1668 } 1669 } 1670 } 1671 1672 /** 1673 * blkcg_policy_register - register a blkcg policy 1674 * @pol: blkcg policy to register 1675 * 1676 * Register @pol with blkcg core. Might sleep and @pol may be modified on 1677 * successful registration. Returns 0 on success and -errno on failure. 1678 */ 1679 int blkcg_policy_register(struct blkcg_policy *pol) 1680 { 1681 struct blkcg *blkcg; 1682 int i, ret; 1683 1684 mutex_lock(&blkcg_pol_register_mutex); 1685 mutex_lock(&blkcg_pol_mutex); 1686 1687 /* find an empty slot */ 1688 ret = -ENOSPC; 1689 for (i = 0; i < BLKCG_MAX_POLS; i++) 1690 if (!blkcg_policy[i]) 1691 break; 1692 if (i >= BLKCG_MAX_POLS) { 1693 pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n"); 1694 goto err_unlock; 1695 } 1696 1697 /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */ 1698 if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) || 1699 (!pol->pd_alloc_fn ^ !pol->pd_free_fn)) 1700 goto err_unlock; 1701 1702 /* register @pol */ 1703 pol->plid = i; 1704 blkcg_policy[pol->plid] = pol; 1705 1706 /* allocate and install cpd's */ 1707 if (pol->cpd_alloc_fn) { 1708 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { 1709 struct blkcg_policy_data *cpd; 1710 1711 cpd = pol->cpd_alloc_fn(GFP_KERNEL); 1712 if (!cpd) 1713 goto err_free_cpds; 1714 1715 blkcg->cpd[pol->plid] = cpd; 1716 cpd->blkcg = blkcg; 1717 cpd->plid = pol->plid; 1718 } 1719 } 1720 1721 mutex_unlock(&blkcg_pol_mutex); 1722 1723 /* everything is in place, add intf files for the new policy */ 1724 if (pol->dfl_cftypes) 1725 WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys, 1726 pol->dfl_cftypes)); 1727 if (pol->legacy_cftypes) 1728 WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys, 1729 pol->legacy_cftypes)); 1730 mutex_unlock(&blkcg_pol_register_mutex); 1731 return 0; 1732 1733 err_free_cpds: 1734 if (pol->cpd_free_fn) 1735 blkcg_free_all_cpd(pol); 1736 1737 blkcg_policy[pol->plid] = NULL; 1738 err_unlock: 1739 mutex_unlock(&blkcg_pol_mutex); 1740 mutex_unlock(&blkcg_pol_register_mutex); 1741 return ret; 1742 } 1743 EXPORT_SYMBOL_GPL(blkcg_policy_register); 1744 1745 /** 1746 * blkcg_policy_unregister - unregister a blkcg policy 1747 * @pol: blkcg policy to unregister 1748 * 1749 * Undo blkcg_policy_register(@pol). Might sleep. 1750 */ 1751 void blkcg_policy_unregister(struct blkcg_policy *pol) 1752 { 1753 mutex_lock(&blkcg_pol_register_mutex); 1754 1755 if (WARN_ON(blkcg_policy[pol->plid] != pol)) 1756 goto out_unlock; 1757 1758 /* kill the intf files first */ 1759 if (pol->dfl_cftypes) 1760 cgroup_rm_cftypes(pol->dfl_cftypes); 1761 if (pol->legacy_cftypes) 1762 cgroup_rm_cftypes(pol->legacy_cftypes); 1763 1764 /* remove cpds and unregister */ 1765 mutex_lock(&blkcg_pol_mutex); 1766 1767 if (pol->cpd_free_fn) 1768 blkcg_free_all_cpd(pol); 1769 1770 blkcg_policy[pol->plid] = NULL; 1771 1772 mutex_unlock(&blkcg_pol_mutex); 1773 out_unlock: 1774 mutex_unlock(&blkcg_pol_register_mutex); 1775 } 1776 EXPORT_SYMBOL_GPL(blkcg_policy_unregister); 1777 1778 /* 1779 * Scale the accumulated delay based on how long it has been since we updated 1780 * the delay. We only call this when we are adding delay, in case it's been a 1781 * while since we added delay, and when we are checking to see if we need to 1782 * delay a task, to account for any delays that may have occurred. 1783 */ 1784 static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now) 1785 { 1786 u64 old = atomic64_read(&blkg->delay_start); 1787 1788 /* negative use_delay means no scaling, see blkcg_set_delay() */ 1789 if (atomic_read(&blkg->use_delay) < 0) 1790 return; 1791 1792 /* 1793 * We only want to scale down every second. The idea here is that we 1794 * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain 1795 * time window. We only want to throttle tasks for recent delay that 1796 * has occurred, in 1 second time windows since that's the maximum 1797 * things can be throttled. We save the current delay window in 1798 * blkg->last_delay so we know what amount is still left to be charged 1799 * to the blkg from this point onward. blkg->last_use keeps track of 1800 * the use_delay counter. The idea is if we're unthrottling the blkg we 1801 * are ok with whatever is happening now, and we can take away more of 1802 * the accumulated delay as we've already throttled enough that 1803 * everybody is happy with their IO latencies. 1804 */ 1805 if (time_before64(old + NSEC_PER_SEC, now) && 1806 atomic64_try_cmpxchg(&blkg->delay_start, &old, now)) { 1807 u64 cur = atomic64_read(&blkg->delay_nsec); 1808 u64 sub = min_t(u64, blkg->last_delay, now - old); 1809 int cur_use = atomic_read(&blkg->use_delay); 1810 1811 /* 1812 * We've been unthrottled, subtract a larger chunk of our 1813 * accumulated delay. 1814 */ 1815 if (cur_use < blkg->last_use) 1816 sub = max_t(u64, sub, blkg->last_delay >> 1); 1817 1818 /* 1819 * This shouldn't happen, but handle it anyway. Our delay_nsec 1820 * should only ever be growing except here where we subtract out 1821 * min(last_delay, 1 second), but lord knows bugs happen and I'd 1822 * rather not end up with negative numbers. 1823 */ 1824 if (unlikely(cur < sub)) { 1825 atomic64_set(&blkg->delay_nsec, 0); 1826 blkg->last_delay = 0; 1827 } else { 1828 atomic64_sub(sub, &blkg->delay_nsec); 1829 blkg->last_delay = cur - sub; 1830 } 1831 blkg->last_use = cur_use; 1832 } 1833 } 1834 1835 /* 1836 * This is called when we want to actually walk up the hierarchy and check to 1837 * see if we need to throttle, and then actually throttle if there is some 1838 * accumulated delay. This should only be called upon return to user space so 1839 * we're not holding some lock that would induce a priority inversion. 1840 */ 1841 static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay) 1842 { 1843 unsigned long pflags; 1844 bool clamp; 1845 u64 now = blk_time_get_ns(); 1846 u64 exp; 1847 u64 delay_nsec = 0; 1848 int tok; 1849 1850 while (blkg->parent) { 1851 int use_delay = atomic_read(&blkg->use_delay); 1852 1853 if (use_delay) { 1854 u64 this_delay; 1855 1856 blkcg_scale_delay(blkg, now); 1857 this_delay = atomic64_read(&blkg->delay_nsec); 1858 if (this_delay > delay_nsec) { 1859 delay_nsec = this_delay; 1860 clamp = use_delay > 0; 1861 } 1862 } 1863 blkg = blkg->parent; 1864 } 1865 1866 if (!delay_nsec) 1867 return; 1868 1869 /* 1870 * Let's not sleep for all eternity if we've amassed a huge delay. 1871 * Swapping or metadata IO can accumulate 10's of seconds worth of 1872 * delay, and we want userspace to be able to do _something_ so cap the 1873 * delays at 0.25s. If there's 10's of seconds worth of delay then the 1874 * tasks will be delayed for 0.25 second for every syscall. If 1875 * blkcg_set_delay() was used as indicated by negative use_delay, the 1876 * caller is responsible for regulating the range. 1877 */ 1878 if (clamp) 1879 delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC); 1880 1881 if (use_memdelay) 1882 psi_memstall_enter(&pflags); 1883 1884 exp = ktime_add_ns(now, delay_nsec); 1885 tok = io_schedule_prepare(); 1886 do { 1887 __set_current_state(TASK_KILLABLE); 1888 if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS)) 1889 break; 1890 } while (!fatal_signal_pending(current)); 1891 io_schedule_finish(tok); 1892 1893 if (use_memdelay) 1894 psi_memstall_leave(&pflags); 1895 } 1896 1897 /** 1898 * blkcg_maybe_throttle_current - throttle the current task if it has been marked 1899 * 1900 * This is only called if we've been marked with set_notify_resume(). Obviously 1901 * we can be set_notify_resume() for reasons other than blkcg throttling, so we 1902 * check to see if current->throttle_disk is set and if not this doesn't do 1903 * anything. This should only ever be called by the resume code, it's not meant 1904 * to be called by people willy-nilly as it will actually do the work to 1905 * throttle the task if it is setup for throttling. 1906 */ 1907 void blkcg_maybe_throttle_current(void) 1908 { 1909 struct gendisk *disk = current->throttle_disk; 1910 struct blkcg *blkcg; 1911 struct blkcg_gq *blkg; 1912 bool use_memdelay = current->use_memdelay; 1913 1914 if (!disk) 1915 return; 1916 1917 current->throttle_disk = NULL; 1918 current->use_memdelay = false; 1919 1920 rcu_read_lock(); 1921 blkcg = css_to_blkcg(blkcg_css()); 1922 if (!blkcg) 1923 goto out; 1924 blkg = blkg_lookup(blkcg, disk->queue); 1925 if (!blkg) 1926 goto out; 1927 if (!blkg_tryget(blkg)) 1928 goto out; 1929 rcu_read_unlock(); 1930 1931 blkcg_maybe_throttle_blkg(blkg, use_memdelay); 1932 blkg_put(blkg); 1933 put_disk(disk); 1934 return; 1935 out: 1936 rcu_read_unlock(); 1937 } 1938 1939 /** 1940 * blkcg_schedule_throttle - this task needs to check for throttling 1941 * @disk: disk to throttle 1942 * @use_memdelay: do we charge this to memory delay for PSI 1943 * 1944 * This is called by the IO controller when we know there's delay accumulated 1945 * for the blkg for this task. We do not pass the blkg because there are places 1946 * we call this that may not have that information, the swapping code for 1947 * instance will only have a block_device at that point. This set's the 1948 * notify_resume for the task to check and see if it requires throttling before 1949 * returning to user space. 1950 * 1951 * We will only schedule once per syscall. You can call this over and over 1952 * again and it will only do the check once upon return to user space, and only 1953 * throttle once. If the task needs to be throttled again it'll need to be 1954 * re-set at the next time we see the task. 1955 */ 1956 void blkcg_schedule_throttle(struct gendisk *disk, bool use_memdelay) 1957 { 1958 if (unlikely(current->flags & PF_KTHREAD)) 1959 return; 1960 1961 if (current->throttle_disk != disk) { 1962 if (test_bit(GD_DEAD, &disk->state)) 1963 return; 1964 get_device(disk_to_dev(disk)); 1965 1966 if (current->throttle_disk) 1967 put_disk(current->throttle_disk); 1968 current->throttle_disk = disk; 1969 } 1970 1971 if (use_memdelay) 1972 current->use_memdelay = use_memdelay; 1973 set_notify_resume(current); 1974 } 1975 1976 /** 1977 * blkcg_add_delay - add delay to this blkg 1978 * @blkg: blkg of interest 1979 * @now: the current time in nanoseconds 1980 * @delta: how many nanoseconds of delay to add 1981 * 1982 * Charge @delta to the blkg's current delay accumulation. This is used to 1983 * throttle tasks if an IO controller thinks we need more throttling. 1984 */ 1985 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta) 1986 { 1987 if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) 1988 return; 1989 blkcg_scale_delay(blkg, now); 1990 atomic64_add(delta, &blkg->delay_nsec); 1991 } 1992 1993 /** 1994 * blkg_tryget_closest - try and get a blkg ref on the closet blkg 1995 * @bio: target bio 1996 * @css: target css 1997 * 1998 * As the failure mode here is to walk up the blkg tree, this ensure that the 1999 * blkg->parent pointers are always valid. This returns the blkg that it ended 2000 * up taking a reference on or %NULL if no reference was taken. 2001 */ 2002 static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio, 2003 struct cgroup_subsys_state *css) 2004 { 2005 struct blkcg_gq *blkg, *ret_blkg = NULL; 2006 2007 rcu_read_lock(); 2008 blkg = blkg_lookup_create(css_to_blkcg(css), bio->bi_bdev->bd_disk); 2009 while (blkg) { 2010 if (blkg_tryget(blkg)) { 2011 ret_blkg = blkg; 2012 break; 2013 } 2014 blkg = blkg->parent; 2015 } 2016 rcu_read_unlock(); 2017 2018 return ret_blkg; 2019 } 2020 2021 /** 2022 * bio_associate_blkg_from_css - associate a bio with a specified css 2023 * @bio: target bio 2024 * @css: target css 2025 * 2026 * Associate @bio with the blkg found by combining the css's blkg and the 2027 * request_queue of the @bio. An association failure is handled by walking up 2028 * the blkg tree. Therefore, the blkg associated can be anything between @blkg 2029 * and q->root_blkg. This situation only happens when a cgroup is dying and 2030 * then the remaining bios will spill to the closest alive blkg. 2031 * 2032 * A reference will be taken on the blkg and will be released when @bio is 2033 * freed. 2034 */ 2035 void bio_associate_blkg_from_css(struct bio *bio, 2036 struct cgroup_subsys_state *css) 2037 { 2038 if (bio->bi_blkg) 2039 blkg_put(bio->bi_blkg); 2040 2041 if (css && css->parent) { 2042 bio->bi_blkg = blkg_tryget_closest(bio, css); 2043 } else { 2044 blkg_get(bdev_get_queue(bio->bi_bdev)->root_blkg); 2045 bio->bi_blkg = bdev_get_queue(bio->bi_bdev)->root_blkg; 2046 } 2047 } 2048 EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css); 2049 2050 /** 2051 * bio_associate_blkg - associate a bio with a blkg 2052 * @bio: target bio 2053 * 2054 * Associate @bio with the blkg found from the bio's css and request_queue. 2055 * If one is not found, bio_lookup_blkg() creates the blkg. If a blkg is 2056 * already associated, the css is reused and association redone as the 2057 * request_queue may have changed. 2058 */ 2059 void bio_associate_blkg(struct bio *bio) 2060 { 2061 struct cgroup_subsys_state *css; 2062 2063 if (blk_op_is_passthrough(bio->bi_opf)) 2064 return; 2065 2066 rcu_read_lock(); 2067 2068 if (bio->bi_blkg) 2069 css = bio_blkcg_css(bio); 2070 else 2071 css = blkcg_css(); 2072 2073 bio_associate_blkg_from_css(bio, css); 2074 2075 rcu_read_unlock(); 2076 } 2077 EXPORT_SYMBOL_GPL(bio_associate_blkg); 2078 2079 /** 2080 * bio_clone_blkg_association - clone blkg association from src to dst bio 2081 * @dst: destination bio 2082 * @src: source bio 2083 */ 2084 void bio_clone_blkg_association(struct bio *dst, struct bio *src) 2085 { 2086 if (src->bi_blkg) 2087 bio_associate_blkg_from_css(dst, bio_blkcg_css(src)); 2088 } 2089 EXPORT_SYMBOL_GPL(bio_clone_blkg_association); 2090 2091 static int blk_cgroup_io_type(struct bio *bio) 2092 { 2093 if (op_is_discard(bio->bi_opf)) 2094 return BLKG_IOSTAT_DISCARD; 2095 if (op_is_write(bio->bi_opf)) 2096 return BLKG_IOSTAT_WRITE; 2097 return BLKG_IOSTAT_READ; 2098 } 2099 2100 void blk_cgroup_bio_start(struct bio *bio) 2101 { 2102 struct blkcg *blkcg = bio->bi_blkg->blkcg; 2103 int rwd = blk_cgroup_io_type(bio), cpu; 2104 struct blkg_iostat_set *bis; 2105 unsigned long flags; 2106 2107 if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) 2108 return; 2109 2110 /* Root-level stats are sourced from system-wide IO stats */ 2111 if (!cgroup_parent(blkcg->css.cgroup)) 2112 return; 2113 2114 cpu = get_cpu(); 2115 bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu); 2116 flags = u64_stats_update_begin_irqsave(&bis->sync); 2117 2118 /* 2119 * If the bio is flagged with BIO_CGROUP_ACCT it means this is a split 2120 * bio and we would have already accounted for the size of the bio. 2121 */ 2122 if (!bio_flagged(bio, BIO_CGROUP_ACCT)) { 2123 bio_set_flag(bio, BIO_CGROUP_ACCT); 2124 bis->cur.bytes[rwd] += bio->bi_iter.bi_size; 2125 } 2126 bis->cur.ios[rwd]++; 2127 2128 /* 2129 * If the iostat_cpu isn't in a lockless list, put it into the 2130 * list to indicate that a stat update is pending. 2131 */ 2132 if (!READ_ONCE(bis->lqueued)) { 2133 struct llist_head *lhead = this_cpu_ptr(blkcg->lhead); 2134 2135 llist_add(&bis->lnode, lhead); 2136 WRITE_ONCE(bis->lqueued, true); 2137 } 2138 2139 u64_stats_update_end_irqrestore(&bis->sync, flags); 2140 cgroup_rstat_updated(blkcg->css.cgroup, cpu); 2141 put_cpu(); 2142 } 2143 2144 bool blk_cgroup_congested(void) 2145 { 2146 struct cgroup_subsys_state *css; 2147 bool ret = false; 2148 2149 rcu_read_lock(); 2150 for (css = blkcg_css(); css; css = css->parent) { 2151 if (atomic_read(&css->cgroup->congestion_count)) { 2152 ret = true; 2153 break; 2154 } 2155 } 2156 rcu_read_unlock(); 2157 return ret; 2158 } 2159 2160 module_param(blkcg_debug_stats, bool, 0644); 2161 MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not"); 2162