1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Common Block IO controller cgroup interface 4 * 5 * Based on ideas and code from CFQ, CFS and BFQ: 6 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 7 * 8 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> 9 * Paolo Valente <paolo.valente@unimore.it> 10 * 11 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> 12 * Nauman Rafique <nauman@google.com> 13 * 14 * For policy-specific per-blkcg data: 15 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it> 16 * Arianna Avanzini <avanzini.arianna@gmail.com> 17 */ 18 #include <linux/ioprio.h> 19 #include <linux/kdev_t.h> 20 #include <linux/module.h> 21 #include <linux/sched/signal.h> 22 #include <linux/err.h> 23 #include <linux/blkdev.h> 24 #include <linux/backing-dev.h> 25 #include <linux/slab.h> 26 #include <linux/delay.h> 27 #include <linux/atomic.h> 28 #include <linux/ctype.h> 29 #include <linux/resume_user_mode.h> 30 #include <linux/psi.h> 31 #include <linux/part_stat.h> 32 #include "blk.h" 33 #include "blk-cgroup.h" 34 #include "blk-ioprio.h" 35 #include "blk-throttle.h" 36 37 static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu); 38 39 /* 40 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation. 41 * blkcg_pol_register_mutex nests outside of it and synchronizes entire 42 * policy [un]register operations including cgroup file additions / 43 * removals. Putting cgroup file registration outside blkcg_pol_mutex 44 * allows grabbing it from cgroup callbacks. 45 */ 46 static DEFINE_MUTEX(blkcg_pol_register_mutex); 47 static DEFINE_MUTEX(blkcg_pol_mutex); 48 49 struct blkcg blkcg_root; 50 EXPORT_SYMBOL_GPL(blkcg_root); 51 52 struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css; 53 EXPORT_SYMBOL_GPL(blkcg_root_css); 54 55 static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; 56 57 static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */ 58 59 bool blkcg_debug_stats = false; 60 61 static DEFINE_RAW_SPINLOCK(blkg_stat_lock); 62 63 #define BLKG_DESTROY_BATCH_SIZE 64 64 65 /* 66 * Lockless lists for tracking IO stats update 67 * 68 * New IO stats are stored in the percpu iostat_cpu within blkcg_gq (blkg). 69 * There are multiple blkg's (one for each block device) attached to each 70 * blkcg. The rstat code keeps track of which cpu has IO stats updated, 71 * but it doesn't know which blkg has the updated stats. If there are many 72 * block devices in a system, the cost of iterating all the blkg's to flush 73 * out the IO stats can be high. To reduce such overhead, a set of percpu 74 * lockless lists (lhead) per blkcg are used to track the set of recently 75 * updated iostat_cpu's since the last flush. An iostat_cpu will be put 76 * onto the lockless list on the update side [blk_cgroup_bio_start()] if 77 * not there yet and then removed when being flushed [blkcg_rstat_flush()]. 78 * References to blkg are gotten and then put back in the process to 79 * protect against blkg removal. 80 * 81 * Return: 0 if successful or -ENOMEM if allocation fails. 82 */ 83 static int init_blkcg_llists(struct blkcg *blkcg) 84 { 85 int cpu; 86 87 blkcg->lhead = alloc_percpu_gfp(struct llist_head, GFP_KERNEL); 88 if (!blkcg->lhead) 89 return -ENOMEM; 90 91 for_each_possible_cpu(cpu) 92 init_llist_head(per_cpu_ptr(blkcg->lhead, cpu)); 93 return 0; 94 } 95 96 /** 97 * blkcg_css - find the current css 98 * 99 * Find the css associated with either the kthread or the current task. 100 * This may return a dying css, so it is up to the caller to use tryget logic 101 * to confirm it is alive and well. 102 */ 103 static struct cgroup_subsys_state *blkcg_css(void) 104 { 105 struct cgroup_subsys_state *css; 106 107 css = kthread_blkcg(); 108 if (css) 109 return css; 110 return task_css(current, io_cgrp_id); 111 } 112 113 static bool blkcg_policy_enabled(struct request_queue *q, 114 const struct blkcg_policy *pol) 115 { 116 return pol && test_bit(pol->plid, q->blkcg_pols); 117 } 118 119 static void blkg_free_workfn(struct work_struct *work) 120 { 121 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, 122 free_work); 123 struct request_queue *q = blkg->q; 124 int i; 125 126 /* 127 * pd_free_fn() can also be called from blkcg_deactivate_policy(), 128 * in order to make sure pd_free_fn() is called in order, the deletion 129 * of the list blkg->q_node is delayed to here from blkg_destroy(), and 130 * blkcg_mutex is used to synchronize blkg_free_workfn() and 131 * blkcg_deactivate_policy(). 132 */ 133 mutex_lock(&q->blkcg_mutex); 134 for (i = 0; i < BLKCG_MAX_POLS; i++) 135 if (blkg->pd[i]) 136 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); 137 if (blkg->parent) 138 blkg_put(blkg->parent); 139 spin_lock_irq(&q->queue_lock); 140 list_del_init(&blkg->q_node); 141 spin_unlock_irq(&q->queue_lock); 142 mutex_unlock(&q->blkcg_mutex); 143 144 blk_put_queue(q); 145 free_percpu(blkg->iostat_cpu); 146 percpu_ref_exit(&blkg->refcnt); 147 kfree(blkg); 148 } 149 150 /** 151 * blkg_free - free a blkg 152 * @blkg: blkg to free 153 * 154 * Free @blkg which may be partially allocated. 155 */ 156 static void blkg_free(struct blkcg_gq *blkg) 157 { 158 if (!blkg) 159 return; 160 161 /* 162 * Both ->pd_free_fn() and request queue's release handler may 163 * sleep, so free us by scheduling one work func 164 */ 165 INIT_WORK(&blkg->free_work, blkg_free_workfn); 166 schedule_work(&blkg->free_work); 167 } 168 169 static void __blkg_release(struct rcu_head *rcu) 170 { 171 struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head); 172 struct blkcg *blkcg = blkg->blkcg; 173 int cpu; 174 175 #ifdef CONFIG_BLK_CGROUP_PUNT_BIO 176 WARN_ON(!bio_list_empty(&blkg->async_bios)); 177 #endif 178 /* 179 * Flush all the non-empty percpu lockless lists before releasing 180 * us, given these stat belongs to us. 181 * 182 * blkg_stat_lock is for serializing blkg stat update 183 */ 184 for_each_possible_cpu(cpu) 185 __blkcg_rstat_flush(blkcg, cpu); 186 187 /* release the blkcg and parent blkg refs this blkg has been holding */ 188 css_put(&blkg->blkcg->css); 189 blkg_free(blkg); 190 } 191 192 /* 193 * A group is RCU protected, but having an rcu lock does not mean that one 194 * can access all the fields of blkg and assume these are valid. For 195 * example, don't try to follow throtl_data and request queue links. 196 * 197 * Having a reference to blkg under an rcu allows accesses to only values 198 * local to groups like group stats and group rate limits. 199 */ 200 static void blkg_release(struct percpu_ref *ref) 201 { 202 struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt); 203 204 call_rcu(&blkg->rcu_head, __blkg_release); 205 } 206 207 #ifdef CONFIG_BLK_CGROUP_PUNT_BIO 208 static struct workqueue_struct *blkcg_punt_bio_wq; 209 210 static void blkg_async_bio_workfn(struct work_struct *work) 211 { 212 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, 213 async_bio_work); 214 struct bio_list bios = BIO_EMPTY_LIST; 215 struct bio *bio; 216 struct blk_plug plug; 217 bool need_plug = false; 218 219 /* as long as there are pending bios, @blkg can't go away */ 220 spin_lock(&blkg->async_bio_lock); 221 bio_list_merge_init(&bios, &blkg->async_bios); 222 spin_unlock(&blkg->async_bio_lock); 223 224 /* start plug only when bio_list contains at least 2 bios */ 225 if (bios.head && bios.head->bi_next) { 226 need_plug = true; 227 blk_start_plug(&plug); 228 } 229 while ((bio = bio_list_pop(&bios))) 230 submit_bio(bio); 231 if (need_plug) 232 blk_finish_plug(&plug); 233 } 234 235 /* 236 * When a shared kthread issues a bio for a cgroup, doing so synchronously can 237 * lead to priority inversions as the kthread can be trapped waiting for that 238 * cgroup. Use this helper instead of submit_bio to punt the actual issuing to 239 * a dedicated per-blkcg work item to avoid such priority inversions. 240 */ 241 void blkcg_punt_bio_submit(struct bio *bio) 242 { 243 struct blkcg_gq *blkg = bio->bi_blkg; 244 245 if (blkg->parent) { 246 spin_lock(&blkg->async_bio_lock); 247 bio_list_add(&blkg->async_bios, bio); 248 spin_unlock(&blkg->async_bio_lock); 249 queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work); 250 } else { 251 /* never bounce for the root cgroup */ 252 submit_bio(bio); 253 } 254 } 255 EXPORT_SYMBOL_GPL(blkcg_punt_bio_submit); 256 257 static int __init blkcg_punt_bio_init(void) 258 { 259 blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio", 260 WQ_MEM_RECLAIM | WQ_FREEZABLE | 261 WQ_UNBOUND | WQ_SYSFS, 0); 262 if (!blkcg_punt_bio_wq) 263 return -ENOMEM; 264 return 0; 265 } 266 subsys_initcall(blkcg_punt_bio_init); 267 #endif /* CONFIG_BLK_CGROUP_PUNT_BIO */ 268 269 /** 270 * bio_blkcg_css - return the blkcg CSS associated with a bio 271 * @bio: target bio 272 * 273 * This returns the CSS for the blkcg associated with a bio, or %NULL if not 274 * associated. Callers are expected to either handle %NULL or know association 275 * has been done prior to calling this. 276 */ 277 struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio) 278 { 279 if (!bio || !bio->bi_blkg) 280 return NULL; 281 return &bio->bi_blkg->blkcg->css; 282 } 283 EXPORT_SYMBOL_GPL(bio_blkcg_css); 284 285 /** 286 * blkcg_parent - get the parent of a blkcg 287 * @blkcg: blkcg of interest 288 * 289 * Return the parent blkcg of @blkcg. Can be called anytime. 290 */ 291 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg) 292 { 293 return css_to_blkcg(blkcg->css.parent); 294 } 295 296 /** 297 * blkg_alloc - allocate a blkg 298 * @blkcg: block cgroup the new blkg is associated with 299 * @disk: gendisk the new blkg is associated with 300 * @gfp_mask: allocation mask to use 301 * 302 * Allocate a new blkg associating @blkcg and @disk. 303 */ 304 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk, 305 gfp_t gfp_mask) 306 { 307 struct blkcg_gq *blkg; 308 int i, cpu; 309 310 /* alloc and init base part */ 311 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, disk->queue->node); 312 if (!blkg) 313 return NULL; 314 if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask)) 315 goto out_free_blkg; 316 blkg->iostat_cpu = alloc_percpu_gfp(struct blkg_iostat_set, gfp_mask); 317 if (!blkg->iostat_cpu) 318 goto out_exit_refcnt; 319 if (!blk_get_queue(disk->queue)) 320 goto out_free_iostat; 321 322 blkg->q = disk->queue; 323 INIT_LIST_HEAD(&blkg->q_node); 324 blkg->blkcg = blkcg; 325 blkg->iostat.blkg = blkg; 326 #ifdef CONFIG_BLK_CGROUP_PUNT_BIO 327 spin_lock_init(&blkg->async_bio_lock); 328 bio_list_init(&blkg->async_bios); 329 INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn); 330 #endif 331 332 u64_stats_init(&blkg->iostat.sync); 333 for_each_possible_cpu(cpu) { 334 u64_stats_init(&per_cpu_ptr(blkg->iostat_cpu, cpu)->sync); 335 per_cpu_ptr(blkg->iostat_cpu, cpu)->blkg = blkg; 336 } 337 338 for (i = 0; i < BLKCG_MAX_POLS; i++) { 339 struct blkcg_policy *pol = blkcg_policy[i]; 340 struct blkg_policy_data *pd; 341 342 if (!blkcg_policy_enabled(disk->queue, pol)) 343 continue; 344 345 /* alloc per-policy data and attach it to blkg */ 346 pd = pol->pd_alloc_fn(disk, blkcg, gfp_mask); 347 if (!pd) 348 goto out_free_pds; 349 blkg->pd[i] = pd; 350 pd->blkg = blkg; 351 pd->plid = i; 352 pd->online = false; 353 } 354 355 return blkg; 356 357 out_free_pds: 358 while (--i >= 0) 359 if (blkg->pd[i]) 360 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); 361 blk_put_queue(disk->queue); 362 out_free_iostat: 363 free_percpu(blkg->iostat_cpu); 364 out_exit_refcnt: 365 percpu_ref_exit(&blkg->refcnt); 366 out_free_blkg: 367 kfree(blkg); 368 return NULL; 369 } 370 371 /* 372 * If @new_blkg is %NULL, this function tries to allocate a new one as 373 * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return. 374 */ 375 static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct gendisk *disk, 376 struct blkcg_gq *new_blkg) 377 { 378 struct blkcg_gq *blkg; 379 int i, ret; 380 381 lockdep_assert_held(&disk->queue->queue_lock); 382 383 /* request_queue is dying, do not create/recreate a blkg */ 384 if (blk_queue_dying(disk->queue)) { 385 ret = -ENODEV; 386 goto err_free_blkg; 387 } 388 389 /* blkg holds a reference to blkcg */ 390 if (!css_tryget_online(&blkcg->css)) { 391 ret = -ENODEV; 392 goto err_free_blkg; 393 } 394 395 /* allocate */ 396 if (!new_blkg) { 397 new_blkg = blkg_alloc(blkcg, disk, GFP_NOWAIT | __GFP_NOWARN); 398 if (unlikely(!new_blkg)) { 399 ret = -ENOMEM; 400 goto err_put_css; 401 } 402 } 403 blkg = new_blkg; 404 405 /* link parent */ 406 if (blkcg_parent(blkcg)) { 407 blkg->parent = blkg_lookup(blkcg_parent(blkcg), disk->queue); 408 if (WARN_ON_ONCE(!blkg->parent)) { 409 ret = -ENODEV; 410 goto err_put_css; 411 } 412 blkg_get(blkg->parent); 413 } 414 415 /* invoke per-policy init */ 416 for (i = 0; i < BLKCG_MAX_POLS; i++) { 417 struct blkcg_policy *pol = blkcg_policy[i]; 418 419 if (blkg->pd[i] && pol->pd_init_fn) 420 pol->pd_init_fn(blkg->pd[i]); 421 } 422 423 /* insert */ 424 spin_lock(&blkcg->lock); 425 ret = radix_tree_insert(&blkcg->blkg_tree, disk->queue->id, blkg); 426 if (likely(!ret)) { 427 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); 428 list_add(&blkg->q_node, &disk->queue->blkg_list); 429 430 for (i = 0; i < BLKCG_MAX_POLS; i++) { 431 struct blkcg_policy *pol = blkcg_policy[i]; 432 433 if (blkg->pd[i]) { 434 if (pol->pd_online_fn) 435 pol->pd_online_fn(blkg->pd[i]); 436 blkg->pd[i]->online = true; 437 } 438 } 439 } 440 blkg->online = true; 441 spin_unlock(&blkcg->lock); 442 443 if (!ret) 444 return blkg; 445 446 /* @blkg failed fully initialized, use the usual release path */ 447 blkg_put(blkg); 448 return ERR_PTR(ret); 449 450 err_put_css: 451 css_put(&blkcg->css); 452 err_free_blkg: 453 if (new_blkg) 454 blkg_free(new_blkg); 455 return ERR_PTR(ret); 456 } 457 458 /** 459 * blkg_lookup_create - lookup blkg, try to create one if not there 460 * @blkcg: blkcg of interest 461 * @disk: gendisk of interest 462 * 463 * Lookup blkg for the @blkcg - @disk pair. If it doesn't exist, try to 464 * create one. blkg creation is performed recursively from blkcg_root such 465 * that all non-root blkg's have access to the parent blkg. This function 466 * should be called under RCU read lock and takes @disk->queue->queue_lock. 467 * 468 * Returns the blkg or the closest blkg if blkg_create() fails as it walks 469 * down from root. 470 */ 471 static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, 472 struct gendisk *disk) 473 { 474 struct request_queue *q = disk->queue; 475 struct blkcg_gq *blkg; 476 unsigned long flags; 477 478 WARN_ON_ONCE(!rcu_read_lock_held()); 479 480 blkg = blkg_lookup(blkcg, q); 481 if (blkg) 482 return blkg; 483 484 spin_lock_irqsave(&q->queue_lock, flags); 485 blkg = blkg_lookup(blkcg, q); 486 if (blkg) { 487 if (blkcg != &blkcg_root && 488 blkg != rcu_dereference(blkcg->blkg_hint)) 489 rcu_assign_pointer(blkcg->blkg_hint, blkg); 490 goto found; 491 } 492 493 /* 494 * Create blkgs walking down from blkcg_root to @blkcg, so that all 495 * non-root blkgs have access to their parents. Returns the closest 496 * blkg to the intended blkg should blkg_create() fail. 497 */ 498 while (true) { 499 struct blkcg *pos = blkcg; 500 struct blkcg *parent = blkcg_parent(blkcg); 501 struct blkcg_gq *ret_blkg = q->root_blkg; 502 503 while (parent) { 504 blkg = blkg_lookup(parent, q); 505 if (blkg) { 506 /* remember closest blkg */ 507 ret_blkg = blkg; 508 break; 509 } 510 pos = parent; 511 parent = blkcg_parent(parent); 512 } 513 514 blkg = blkg_create(pos, disk, NULL); 515 if (IS_ERR(blkg)) { 516 blkg = ret_blkg; 517 break; 518 } 519 if (pos == blkcg) 520 break; 521 } 522 523 found: 524 spin_unlock_irqrestore(&q->queue_lock, flags); 525 return blkg; 526 } 527 528 static void blkg_destroy(struct blkcg_gq *blkg) 529 { 530 struct blkcg *blkcg = blkg->blkcg; 531 int i; 532 533 lockdep_assert_held(&blkg->q->queue_lock); 534 lockdep_assert_held(&blkcg->lock); 535 536 /* 537 * blkg stays on the queue list until blkg_free_workfn(), see details in 538 * blkg_free_workfn(), hence this function can be called from 539 * blkcg_destroy_blkgs() first and again from blkg_destroy_all() before 540 * blkg_free_workfn(). 541 */ 542 if (hlist_unhashed(&blkg->blkcg_node)) 543 return; 544 545 for (i = 0; i < BLKCG_MAX_POLS; i++) { 546 struct blkcg_policy *pol = blkcg_policy[i]; 547 548 if (blkg->pd[i] && blkg->pd[i]->online) { 549 blkg->pd[i]->online = false; 550 if (pol->pd_offline_fn) 551 pol->pd_offline_fn(blkg->pd[i]); 552 } 553 } 554 555 blkg->online = false; 556 557 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); 558 hlist_del_init_rcu(&blkg->blkcg_node); 559 560 /* 561 * Both setting lookup hint to and clearing it from @blkg are done 562 * under queue_lock. If it's not pointing to @blkg now, it never 563 * will. Hint assignment itself can race safely. 564 */ 565 if (rcu_access_pointer(blkcg->blkg_hint) == blkg) 566 rcu_assign_pointer(blkcg->blkg_hint, NULL); 567 568 /* 569 * Put the reference taken at the time of creation so that when all 570 * queues are gone, group can be destroyed. 571 */ 572 percpu_ref_kill(&blkg->refcnt); 573 } 574 575 static void blkg_destroy_all(struct gendisk *disk) 576 { 577 struct request_queue *q = disk->queue; 578 struct blkcg_gq *blkg; 579 int count = BLKG_DESTROY_BATCH_SIZE; 580 int i; 581 582 restart: 583 spin_lock_irq(&q->queue_lock); 584 list_for_each_entry(blkg, &q->blkg_list, q_node) { 585 struct blkcg *blkcg = blkg->blkcg; 586 587 if (hlist_unhashed(&blkg->blkcg_node)) 588 continue; 589 590 spin_lock(&blkcg->lock); 591 blkg_destroy(blkg); 592 spin_unlock(&blkcg->lock); 593 594 /* 595 * in order to avoid holding the spin lock for too long, release 596 * it when a batch of blkgs are destroyed. 597 */ 598 if (!(--count)) { 599 count = BLKG_DESTROY_BATCH_SIZE; 600 spin_unlock_irq(&q->queue_lock); 601 cond_resched(); 602 goto restart; 603 } 604 } 605 606 /* 607 * Mark policy deactivated since policy offline has been done, and 608 * the free is scheduled, so future blkcg_deactivate_policy() can 609 * be bypassed 610 */ 611 for (i = 0; i < BLKCG_MAX_POLS; i++) { 612 struct blkcg_policy *pol = blkcg_policy[i]; 613 614 if (pol) 615 __clear_bit(pol->plid, q->blkcg_pols); 616 } 617 618 q->root_blkg = NULL; 619 spin_unlock_irq(&q->queue_lock); 620 } 621 622 static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src) 623 { 624 int i; 625 626 for (i = 0; i < BLKG_IOSTAT_NR; i++) { 627 dst->bytes[i] = src->bytes[i]; 628 dst->ios[i] = src->ios[i]; 629 } 630 } 631 632 static void __blkg_clear_stat(struct blkg_iostat_set *bis) 633 { 634 struct blkg_iostat cur = {0}; 635 unsigned long flags; 636 637 flags = u64_stats_update_begin_irqsave(&bis->sync); 638 blkg_iostat_set(&bis->cur, &cur); 639 blkg_iostat_set(&bis->last, &cur); 640 u64_stats_update_end_irqrestore(&bis->sync, flags); 641 } 642 643 static void blkg_clear_stat(struct blkcg_gq *blkg) 644 { 645 int cpu; 646 647 for_each_possible_cpu(cpu) { 648 struct blkg_iostat_set *s = per_cpu_ptr(blkg->iostat_cpu, cpu); 649 650 __blkg_clear_stat(s); 651 } 652 __blkg_clear_stat(&blkg->iostat); 653 } 654 655 static int blkcg_reset_stats(struct cgroup_subsys_state *css, 656 struct cftype *cftype, u64 val) 657 { 658 struct blkcg *blkcg = css_to_blkcg(css); 659 struct blkcg_gq *blkg; 660 int i; 661 662 mutex_lock(&blkcg_pol_mutex); 663 spin_lock_irq(&blkcg->lock); 664 665 /* 666 * Note that stat reset is racy - it doesn't synchronize against 667 * stat updates. This is a debug feature which shouldn't exist 668 * anyway. If you get hit by a race, retry. 669 */ 670 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { 671 blkg_clear_stat(blkg); 672 for (i = 0; i < BLKCG_MAX_POLS; i++) { 673 struct blkcg_policy *pol = blkcg_policy[i]; 674 675 if (blkg->pd[i] && pol->pd_reset_stats_fn) 676 pol->pd_reset_stats_fn(blkg->pd[i]); 677 } 678 } 679 680 spin_unlock_irq(&blkcg->lock); 681 mutex_unlock(&blkcg_pol_mutex); 682 return 0; 683 } 684 685 const char *blkg_dev_name(struct blkcg_gq *blkg) 686 { 687 if (!blkg->q->disk) 688 return NULL; 689 return bdi_dev_name(blkg->q->disk->bdi); 690 } 691 692 /** 693 * blkcg_print_blkgs - helper for printing per-blkg data 694 * @sf: seq_file to print to 695 * @blkcg: blkcg of interest 696 * @prfill: fill function to print out a blkg 697 * @pol: policy in question 698 * @data: data to be passed to @prfill 699 * @show_total: to print out sum of prfill return values or not 700 * 701 * This function invokes @prfill on each blkg of @blkcg if pd for the 702 * policy specified by @pol exists. @prfill is invoked with @sf, the 703 * policy data and @data and the matching queue lock held. If @show_total 704 * is %true, the sum of the return values from @prfill is printed with 705 * "Total" label at the end. 706 * 707 * This is to be used to construct print functions for 708 * cftype->read_seq_string method. 709 */ 710 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, 711 u64 (*prfill)(struct seq_file *, 712 struct blkg_policy_data *, int), 713 const struct blkcg_policy *pol, int data, 714 bool show_total) 715 { 716 struct blkcg_gq *blkg; 717 u64 total = 0; 718 719 rcu_read_lock(); 720 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { 721 spin_lock_irq(&blkg->q->queue_lock); 722 if (blkcg_policy_enabled(blkg->q, pol)) 723 total += prfill(sf, blkg->pd[pol->plid], data); 724 spin_unlock_irq(&blkg->q->queue_lock); 725 } 726 rcu_read_unlock(); 727 728 if (show_total) 729 seq_printf(sf, "Total %llu\n", (unsigned long long)total); 730 } 731 EXPORT_SYMBOL_GPL(blkcg_print_blkgs); 732 733 /** 734 * __blkg_prfill_u64 - prfill helper for a single u64 value 735 * @sf: seq_file to print to 736 * @pd: policy private data of interest 737 * @v: value to print 738 * 739 * Print @v to @sf for the device associated with @pd. 740 */ 741 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) 742 { 743 const char *dname = blkg_dev_name(pd->blkg); 744 745 if (!dname) 746 return 0; 747 748 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); 749 return v; 750 } 751 EXPORT_SYMBOL_GPL(__blkg_prfill_u64); 752 753 /** 754 * blkg_conf_init - initialize a blkg_conf_ctx 755 * @ctx: blkg_conf_ctx to initialize 756 * @input: input string 757 * 758 * Initialize @ctx which can be used to parse blkg config input string @input. 759 * Once initialized, @ctx can be used with blkg_conf_open_bdev() and 760 * blkg_conf_prep(), and must be cleaned up with blkg_conf_exit(). 761 */ 762 void blkg_conf_init(struct blkg_conf_ctx *ctx, char *input) 763 { 764 *ctx = (struct blkg_conf_ctx){ .input = input }; 765 } 766 EXPORT_SYMBOL_GPL(blkg_conf_init); 767 768 /** 769 * blkg_conf_open_bdev - parse and open bdev for per-blkg config update 770 * @ctx: blkg_conf_ctx initialized with blkg_conf_init() 771 * 772 * Parse the device node prefix part, MAJ:MIN, of per-blkg config update from 773 * @ctx->input and get and store the matching bdev in @ctx->bdev. @ctx->body is 774 * set to point past the device node prefix. 775 * 776 * This function may be called multiple times on @ctx and the extra calls become 777 * NOOPs. blkg_conf_prep() implicitly calls this function. Use this function 778 * explicitly if bdev access is needed without resolving the blkcg / policy part 779 * of @ctx->input. Returns -errno on error. 780 */ 781 int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx) 782 { 783 char *input = ctx->input; 784 unsigned int major, minor; 785 struct block_device *bdev; 786 int key_len; 787 788 if (ctx->bdev) 789 return 0; 790 791 if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2) 792 return -EINVAL; 793 794 input += key_len; 795 if (!isspace(*input)) 796 return -EINVAL; 797 input = skip_spaces(input); 798 799 bdev = blkdev_get_no_open(MKDEV(major, minor)); 800 if (!bdev) 801 return -ENODEV; 802 if (bdev_is_partition(bdev)) { 803 blkdev_put_no_open(bdev); 804 return -ENODEV; 805 } 806 807 mutex_lock(&bdev->bd_queue->rq_qos_mutex); 808 if (!disk_live(bdev->bd_disk)) { 809 blkdev_put_no_open(bdev); 810 mutex_unlock(&bdev->bd_queue->rq_qos_mutex); 811 return -ENODEV; 812 } 813 814 ctx->body = input; 815 ctx->bdev = bdev; 816 return 0; 817 } 818 819 /** 820 * blkg_conf_prep - parse and prepare for per-blkg config update 821 * @blkcg: target block cgroup 822 * @pol: target policy 823 * @ctx: blkg_conf_ctx initialized with blkg_conf_init() 824 * 825 * Parse per-blkg config update from @ctx->input and initialize @ctx 826 * accordingly. On success, @ctx->body points to the part of @ctx->input 827 * following MAJ:MIN, @ctx->bdev points to the target block device and 828 * @ctx->blkg to the blkg being configured. 829 * 830 * blkg_conf_open_bdev() may be called on @ctx beforehand. On success, this 831 * function returns with queue lock held and must be followed by 832 * blkg_conf_exit(). 833 */ 834 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, 835 struct blkg_conf_ctx *ctx) 836 __acquires(&bdev->bd_queue->queue_lock) 837 { 838 struct gendisk *disk; 839 struct request_queue *q; 840 struct blkcg_gq *blkg; 841 int ret; 842 843 ret = blkg_conf_open_bdev(ctx); 844 if (ret) 845 return ret; 846 847 disk = ctx->bdev->bd_disk; 848 q = disk->queue; 849 850 /* 851 * blkcg_deactivate_policy() requires queue to be frozen, we can grab 852 * q_usage_counter to prevent concurrent with blkcg_deactivate_policy(). 853 */ 854 ret = blk_queue_enter(q, 0); 855 if (ret) 856 goto fail; 857 858 spin_lock_irq(&q->queue_lock); 859 860 if (!blkcg_policy_enabled(q, pol)) { 861 ret = -EOPNOTSUPP; 862 goto fail_unlock; 863 } 864 865 blkg = blkg_lookup(blkcg, q); 866 if (blkg) 867 goto success; 868 869 /* 870 * Create blkgs walking down from blkcg_root to @blkcg, so that all 871 * non-root blkgs have access to their parents. 872 */ 873 while (true) { 874 struct blkcg *pos = blkcg; 875 struct blkcg *parent; 876 struct blkcg_gq *new_blkg; 877 878 parent = blkcg_parent(blkcg); 879 while (parent && !blkg_lookup(parent, q)) { 880 pos = parent; 881 parent = blkcg_parent(parent); 882 } 883 884 /* Drop locks to do new blkg allocation with GFP_KERNEL. */ 885 spin_unlock_irq(&q->queue_lock); 886 887 new_blkg = blkg_alloc(pos, disk, GFP_KERNEL); 888 if (unlikely(!new_blkg)) { 889 ret = -ENOMEM; 890 goto fail_exit_queue; 891 } 892 893 if (radix_tree_preload(GFP_KERNEL)) { 894 blkg_free(new_blkg); 895 ret = -ENOMEM; 896 goto fail_exit_queue; 897 } 898 899 spin_lock_irq(&q->queue_lock); 900 901 if (!blkcg_policy_enabled(q, pol)) { 902 blkg_free(new_blkg); 903 ret = -EOPNOTSUPP; 904 goto fail_preloaded; 905 } 906 907 blkg = blkg_lookup(pos, q); 908 if (blkg) { 909 blkg_free(new_blkg); 910 } else { 911 blkg = blkg_create(pos, disk, new_blkg); 912 if (IS_ERR(blkg)) { 913 ret = PTR_ERR(blkg); 914 goto fail_preloaded; 915 } 916 } 917 918 radix_tree_preload_end(); 919 920 if (pos == blkcg) 921 goto success; 922 } 923 success: 924 blk_queue_exit(q); 925 ctx->blkg = blkg; 926 return 0; 927 928 fail_preloaded: 929 radix_tree_preload_end(); 930 fail_unlock: 931 spin_unlock_irq(&q->queue_lock); 932 fail_exit_queue: 933 blk_queue_exit(q); 934 fail: 935 /* 936 * If queue was bypassing, we should retry. Do so after a 937 * short msleep(). It isn't strictly necessary but queue 938 * can be bypassing for some time and it's always nice to 939 * avoid busy looping. 940 */ 941 if (ret == -EBUSY) { 942 msleep(10); 943 ret = restart_syscall(); 944 } 945 return ret; 946 } 947 EXPORT_SYMBOL_GPL(blkg_conf_prep); 948 949 /** 950 * blkg_conf_exit - clean up per-blkg config update 951 * @ctx: blkg_conf_ctx initialized with blkg_conf_init() 952 * 953 * Clean up after per-blkg config update. This function must be called on all 954 * blkg_conf_ctx's initialized with blkg_conf_init(). 955 */ 956 void blkg_conf_exit(struct blkg_conf_ctx *ctx) 957 __releases(&ctx->bdev->bd_queue->queue_lock) 958 __releases(&ctx->bdev->bd_queue->rq_qos_mutex) 959 { 960 if (ctx->blkg) { 961 spin_unlock_irq(&bdev_get_queue(ctx->bdev)->queue_lock); 962 ctx->blkg = NULL; 963 } 964 965 if (ctx->bdev) { 966 mutex_unlock(&ctx->bdev->bd_queue->rq_qos_mutex); 967 blkdev_put_no_open(ctx->bdev); 968 ctx->body = NULL; 969 ctx->bdev = NULL; 970 } 971 } 972 EXPORT_SYMBOL_GPL(blkg_conf_exit); 973 974 static void blkg_iostat_add(struct blkg_iostat *dst, struct blkg_iostat *src) 975 { 976 int i; 977 978 for (i = 0; i < BLKG_IOSTAT_NR; i++) { 979 dst->bytes[i] += src->bytes[i]; 980 dst->ios[i] += src->ios[i]; 981 } 982 } 983 984 static void blkg_iostat_sub(struct blkg_iostat *dst, struct blkg_iostat *src) 985 { 986 int i; 987 988 for (i = 0; i < BLKG_IOSTAT_NR; i++) { 989 dst->bytes[i] -= src->bytes[i]; 990 dst->ios[i] -= src->ios[i]; 991 } 992 } 993 994 static void blkcg_iostat_update(struct blkcg_gq *blkg, struct blkg_iostat *cur, 995 struct blkg_iostat *last) 996 { 997 struct blkg_iostat delta; 998 unsigned long flags; 999 1000 /* propagate percpu delta to global */ 1001 flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); 1002 blkg_iostat_set(&delta, cur); 1003 blkg_iostat_sub(&delta, last); 1004 blkg_iostat_add(&blkg->iostat.cur, &delta); 1005 blkg_iostat_add(last, &delta); 1006 u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); 1007 } 1008 1009 static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu) 1010 { 1011 struct llist_head *lhead = per_cpu_ptr(blkcg->lhead, cpu); 1012 struct llist_node *lnode; 1013 struct blkg_iostat_set *bisc, *next_bisc; 1014 unsigned long flags; 1015 1016 rcu_read_lock(); 1017 1018 lnode = llist_del_all(lhead); 1019 if (!lnode) 1020 goto out; 1021 1022 /* 1023 * For covering concurrent parent blkg update from blkg_release(). 1024 * 1025 * When flushing from cgroup, cgroup_rstat_lock is always held, so 1026 * this lock won't cause contention most of time. 1027 */ 1028 raw_spin_lock_irqsave(&blkg_stat_lock, flags); 1029 1030 /* 1031 * Iterate only the iostat_cpu's queued in the lockless list. 1032 */ 1033 llist_for_each_entry_safe(bisc, next_bisc, lnode, lnode) { 1034 struct blkcg_gq *blkg = bisc->blkg; 1035 struct blkcg_gq *parent = blkg->parent; 1036 struct blkg_iostat cur; 1037 unsigned int seq; 1038 1039 /* 1040 * Order assignment of `next_bisc` from `bisc->lnode.next` in 1041 * llist_for_each_entry_safe and clearing `bisc->lqueued` for 1042 * avoiding to assign `next_bisc` with new next pointer added 1043 * in blk_cgroup_bio_start() in case of re-ordering. 1044 * 1045 * The pair barrier is implied in llist_add() in blk_cgroup_bio_start(). 1046 */ 1047 smp_mb(); 1048 1049 WRITE_ONCE(bisc->lqueued, false); 1050 if (bisc == &blkg->iostat) 1051 goto propagate_up; /* propagate up to parent only */ 1052 1053 /* fetch the current per-cpu values */ 1054 do { 1055 seq = u64_stats_fetch_begin(&bisc->sync); 1056 blkg_iostat_set(&cur, &bisc->cur); 1057 } while (u64_stats_fetch_retry(&bisc->sync, seq)); 1058 1059 blkcg_iostat_update(blkg, &cur, &bisc->last); 1060 1061 propagate_up: 1062 /* propagate global delta to parent (unless that's root) */ 1063 if (parent && parent->parent) { 1064 blkcg_iostat_update(parent, &blkg->iostat.cur, 1065 &blkg->iostat.last); 1066 /* 1067 * Queue parent->iostat to its blkcg's lockless 1068 * list to propagate up to the grandparent if the 1069 * iostat hasn't been queued yet. 1070 */ 1071 if (!parent->iostat.lqueued) { 1072 struct llist_head *plhead; 1073 1074 plhead = per_cpu_ptr(parent->blkcg->lhead, cpu); 1075 llist_add(&parent->iostat.lnode, plhead); 1076 parent->iostat.lqueued = true; 1077 } 1078 } 1079 } 1080 raw_spin_unlock_irqrestore(&blkg_stat_lock, flags); 1081 out: 1082 rcu_read_unlock(); 1083 } 1084 1085 static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu) 1086 { 1087 /* Root-level stats are sourced from system-wide IO stats */ 1088 if (cgroup_parent(css->cgroup)) 1089 __blkcg_rstat_flush(css_to_blkcg(css), cpu); 1090 } 1091 1092 /* 1093 * We source root cgroup stats from the system-wide stats to avoid 1094 * tracking the same information twice and incurring overhead when no 1095 * cgroups are defined. For that reason, cgroup_rstat_flush in 1096 * blkcg_print_stat does not actually fill out the iostat in the root 1097 * cgroup's blkcg_gq. 1098 * 1099 * However, we would like to re-use the printing code between the root and 1100 * non-root cgroups to the extent possible. For that reason, we simulate 1101 * flushing the root cgroup's stats by explicitly filling in the iostat 1102 * with disk level statistics. 1103 */ 1104 static void blkcg_fill_root_iostats(void) 1105 { 1106 struct class_dev_iter iter; 1107 struct device *dev; 1108 1109 class_dev_iter_init(&iter, &block_class, NULL, &disk_type); 1110 while ((dev = class_dev_iter_next(&iter))) { 1111 struct block_device *bdev = dev_to_bdev(dev); 1112 struct blkcg_gq *blkg = bdev->bd_disk->queue->root_blkg; 1113 struct blkg_iostat tmp; 1114 int cpu; 1115 unsigned long flags; 1116 1117 memset(&tmp, 0, sizeof(tmp)); 1118 for_each_possible_cpu(cpu) { 1119 struct disk_stats *cpu_dkstats; 1120 1121 cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu); 1122 tmp.ios[BLKG_IOSTAT_READ] += 1123 cpu_dkstats->ios[STAT_READ]; 1124 tmp.ios[BLKG_IOSTAT_WRITE] += 1125 cpu_dkstats->ios[STAT_WRITE]; 1126 tmp.ios[BLKG_IOSTAT_DISCARD] += 1127 cpu_dkstats->ios[STAT_DISCARD]; 1128 // convert sectors to bytes 1129 tmp.bytes[BLKG_IOSTAT_READ] += 1130 cpu_dkstats->sectors[STAT_READ] << 9; 1131 tmp.bytes[BLKG_IOSTAT_WRITE] += 1132 cpu_dkstats->sectors[STAT_WRITE] << 9; 1133 tmp.bytes[BLKG_IOSTAT_DISCARD] += 1134 cpu_dkstats->sectors[STAT_DISCARD] << 9; 1135 } 1136 1137 flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); 1138 blkg_iostat_set(&blkg->iostat.cur, &tmp); 1139 u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); 1140 } 1141 class_dev_iter_exit(&iter); 1142 } 1143 1144 static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s) 1145 { 1146 struct blkg_iostat_set *bis = &blkg->iostat; 1147 u64 rbytes, wbytes, rios, wios, dbytes, dios; 1148 const char *dname; 1149 unsigned seq; 1150 int i; 1151 1152 if (!blkg->online) 1153 return; 1154 1155 dname = blkg_dev_name(blkg); 1156 if (!dname) 1157 return; 1158 1159 seq_printf(s, "%s ", dname); 1160 1161 do { 1162 seq = u64_stats_fetch_begin(&bis->sync); 1163 1164 rbytes = bis->cur.bytes[BLKG_IOSTAT_READ]; 1165 wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE]; 1166 dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD]; 1167 rios = bis->cur.ios[BLKG_IOSTAT_READ]; 1168 wios = bis->cur.ios[BLKG_IOSTAT_WRITE]; 1169 dios = bis->cur.ios[BLKG_IOSTAT_DISCARD]; 1170 } while (u64_stats_fetch_retry(&bis->sync, seq)); 1171 1172 if (rbytes || wbytes || rios || wios) { 1173 seq_printf(s, "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu", 1174 rbytes, wbytes, rios, wios, 1175 dbytes, dios); 1176 } 1177 1178 if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) { 1179 seq_printf(s, " use_delay=%d delay_nsec=%llu", 1180 atomic_read(&blkg->use_delay), 1181 atomic64_read(&blkg->delay_nsec)); 1182 } 1183 1184 for (i = 0; i < BLKCG_MAX_POLS; i++) { 1185 struct blkcg_policy *pol = blkcg_policy[i]; 1186 1187 if (!blkg->pd[i] || !pol->pd_stat_fn) 1188 continue; 1189 1190 pol->pd_stat_fn(blkg->pd[i], s); 1191 } 1192 1193 seq_puts(s, "\n"); 1194 } 1195 1196 static int blkcg_print_stat(struct seq_file *sf, void *v) 1197 { 1198 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); 1199 struct blkcg_gq *blkg; 1200 1201 if (!seq_css(sf)->parent) 1202 blkcg_fill_root_iostats(); 1203 else 1204 cgroup_rstat_flush(blkcg->css.cgroup); 1205 1206 rcu_read_lock(); 1207 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { 1208 spin_lock_irq(&blkg->q->queue_lock); 1209 blkcg_print_one_stat(blkg, sf); 1210 spin_unlock_irq(&blkg->q->queue_lock); 1211 } 1212 rcu_read_unlock(); 1213 return 0; 1214 } 1215 1216 static struct cftype blkcg_files[] = { 1217 { 1218 .name = "stat", 1219 .seq_show = blkcg_print_stat, 1220 }, 1221 { } /* terminate */ 1222 }; 1223 1224 static struct cftype blkcg_legacy_files[] = { 1225 { 1226 .name = "reset_stats", 1227 .write_u64 = blkcg_reset_stats, 1228 }, 1229 { } /* terminate */ 1230 }; 1231 1232 #ifdef CONFIG_CGROUP_WRITEBACK 1233 struct list_head *blkcg_get_cgwb_list(struct cgroup_subsys_state *css) 1234 { 1235 return &css_to_blkcg(css)->cgwb_list; 1236 } 1237 #endif 1238 1239 /* 1240 * blkcg destruction is a three-stage process. 1241 * 1242 * 1. Destruction starts. The blkcg_css_offline() callback is invoked 1243 * which offlines writeback. Here we tie the next stage of blkg destruction 1244 * to the completion of writeback associated with the blkcg. This lets us 1245 * avoid punting potentially large amounts of outstanding writeback to root 1246 * while maintaining any ongoing policies. The next stage is triggered when 1247 * the nr_cgwbs count goes to zero. 1248 * 1249 * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called 1250 * and handles the destruction of blkgs. Here the css reference held by 1251 * the blkg is put back eventually allowing blkcg_css_free() to be called. 1252 * This work may occur in cgwb_release_workfn() on the cgwb_release 1253 * workqueue. Any submitted ios that fail to get the blkg ref will be 1254 * punted to the root_blkg. 1255 * 1256 * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called. 1257 * This finally frees the blkcg. 1258 */ 1259 1260 /** 1261 * blkcg_destroy_blkgs - responsible for shooting down blkgs 1262 * @blkcg: blkcg of interest 1263 * 1264 * blkgs should be removed while holding both q and blkcg locks. As blkcg lock 1265 * is nested inside q lock, this function performs reverse double lock dancing. 1266 * Destroying the blkgs releases the reference held on the blkcg's css allowing 1267 * blkcg_css_free to eventually be called. 1268 * 1269 * This is the blkcg counterpart of ioc_release_fn(). 1270 */ 1271 static void blkcg_destroy_blkgs(struct blkcg *blkcg) 1272 { 1273 might_sleep(); 1274 1275 spin_lock_irq(&blkcg->lock); 1276 1277 while (!hlist_empty(&blkcg->blkg_list)) { 1278 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, 1279 struct blkcg_gq, blkcg_node); 1280 struct request_queue *q = blkg->q; 1281 1282 if (need_resched() || !spin_trylock(&q->queue_lock)) { 1283 /* 1284 * Given that the system can accumulate a huge number 1285 * of blkgs in pathological cases, check to see if we 1286 * need to rescheduling to avoid softlockup. 1287 */ 1288 spin_unlock_irq(&blkcg->lock); 1289 cond_resched(); 1290 spin_lock_irq(&blkcg->lock); 1291 continue; 1292 } 1293 1294 blkg_destroy(blkg); 1295 spin_unlock(&q->queue_lock); 1296 } 1297 1298 spin_unlock_irq(&blkcg->lock); 1299 } 1300 1301 /** 1302 * blkcg_pin_online - pin online state 1303 * @blkcg_css: blkcg of interest 1304 * 1305 * While pinned, a blkcg is kept online. This is primarily used to 1306 * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline 1307 * while an associated cgwb is still active. 1308 */ 1309 void blkcg_pin_online(struct cgroup_subsys_state *blkcg_css) 1310 { 1311 refcount_inc(&css_to_blkcg(blkcg_css)->online_pin); 1312 } 1313 1314 /** 1315 * blkcg_unpin_online - unpin online state 1316 * @blkcg_css: blkcg of interest 1317 * 1318 * This is primarily used to impedance-match blkg and cgwb lifetimes so 1319 * that blkg doesn't go offline while an associated cgwb is still active. 1320 * When this count goes to zero, all active cgwbs have finished so the 1321 * blkcg can continue destruction by calling blkcg_destroy_blkgs(). 1322 */ 1323 void blkcg_unpin_online(struct cgroup_subsys_state *blkcg_css) 1324 { 1325 struct blkcg *blkcg = css_to_blkcg(blkcg_css); 1326 1327 do { 1328 struct blkcg *parent; 1329 1330 if (!refcount_dec_and_test(&blkcg->online_pin)) 1331 break; 1332 1333 parent = blkcg_parent(blkcg); 1334 blkcg_destroy_blkgs(blkcg); 1335 blkcg = parent; 1336 } while (blkcg); 1337 } 1338 1339 /** 1340 * blkcg_css_offline - cgroup css_offline callback 1341 * @css: css of interest 1342 * 1343 * This function is called when @css is about to go away. Here the cgwbs are 1344 * offlined first and only once writeback associated with the blkcg has 1345 * finished do we start step 2 (see above). 1346 */ 1347 static void blkcg_css_offline(struct cgroup_subsys_state *css) 1348 { 1349 /* this prevents anyone from attaching or migrating to this blkcg */ 1350 wb_blkcg_offline(css); 1351 1352 /* put the base online pin allowing step 2 to be triggered */ 1353 blkcg_unpin_online(css); 1354 } 1355 1356 static void blkcg_css_free(struct cgroup_subsys_state *css) 1357 { 1358 struct blkcg *blkcg = css_to_blkcg(css); 1359 int i; 1360 1361 mutex_lock(&blkcg_pol_mutex); 1362 1363 list_del(&blkcg->all_blkcgs_node); 1364 1365 for (i = 0; i < BLKCG_MAX_POLS; i++) 1366 if (blkcg->cpd[i]) 1367 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); 1368 1369 mutex_unlock(&blkcg_pol_mutex); 1370 1371 free_percpu(blkcg->lhead); 1372 kfree(blkcg); 1373 } 1374 1375 static struct cgroup_subsys_state * 1376 blkcg_css_alloc(struct cgroup_subsys_state *parent_css) 1377 { 1378 struct blkcg *blkcg; 1379 int i; 1380 1381 mutex_lock(&blkcg_pol_mutex); 1382 1383 if (!parent_css) { 1384 blkcg = &blkcg_root; 1385 } else { 1386 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); 1387 if (!blkcg) 1388 goto unlock; 1389 } 1390 1391 if (init_blkcg_llists(blkcg)) 1392 goto free_blkcg; 1393 1394 for (i = 0; i < BLKCG_MAX_POLS ; i++) { 1395 struct blkcg_policy *pol = blkcg_policy[i]; 1396 struct blkcg_policy_data *cpd; 1397 1398 /* 1399 * If the policy hasn't been attached yet, wait for it 1400 * to be attached before doing anything else. Otherwise, 1401 * check if the policy requires any specific per-cgroup 1402 * data: if it does, allocate and initialize it. 1403 */ 1404 if (!pol || !pol->cpd_alloc_fn) 1405 continue; 1406 1407 cpd = pol->cpd_alloc_fn(GFP_KERNEL); 1408 if (!cpd) 1409 goto free_pd_blkcg; 1410 1411 blkcg->cpd[i] = cpd; 1412 cpd->blkcg = blkcg; 1413 cpd->plid = i; 1414 } 1415 1416 spin_lock_init(&blkcg->lock); 1417 refcount_set(&blkcg->online_pin, 1); 1418 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN); 1419 INIT_HLIST_HEAD(&blkcg->blkg_list); 1420 #ifdef CONFIG_CGROUP_WRITEBACK 1421 INIT_LIST_HEAD(&blkcg->cgwb_list); 1422 #endif 1423 list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs); 1424 1425 mutex_unlock(&blkcg_pol_mutex); 1426 return &blkcg->css; 1427 1428 free_pd_blkcg: 1429 for (i--; i >= 0; i--) 1430 if (blkcg->cpd[i]) 1431 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); 1432 free_percpu(blkcg->lhead); 1433 free_blkcg: 1434 if (blkcg != &blkcg_root) 1435 kfree(blkcg); 1436 unlock: 1437 mutex_unlock(&blkcg_pol_mutex); 1438 return ERR_PTR(-ENOMEM); 1439 } 1440 1441 static int blkcg_css_online(struct cgroup_subsys_state *css) 1442 { 1443 struct blkcg *parent = blkcg_parent(css_to_blkcg(css)); 1444 1445 /* 1446 * blkcg_pin_online() is used to delay blkcg offline so that blkgs 1447 * don't go offline while cgwbs are still active on them. Pin the 1448 * parent so that offline always happens towards the root. 1449 */ 1450 if (parent) 1451 blkcg_pin_online(&parent->css); 1452 return 0; 1453 } 1454 1455 void blkg_init_queue(struct request_queue *q) 1456 { 1457 INIT_LIST_HEAD(&q->blkg_list); 1458 mutex_init(&q->blkcg_mutex); 1459 } 1460 1461 int blkcg_init_disk(struct gendisk *disk) 1462 { 1463 struct request_queue *q = disk->queue; 1464 struct blkcg_gq *new_blkg, *blkg; 1465 bool preloaded; 1466 1467 new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL); 1468 if (!new_blkg) 1469 return -ENOMEM; 1470 1471 preloaded = !radix_tree_preload(GFP_KERNEL); 1472 1473 /* Make sure the root blkg exists. */ 1474 /* spin_lock_irq can serve as RCU read-side critical section. */ 1475 spin_lock_irq(&q->queue_lock); 1476 blkg = blkg_create(&blkcg_root, disk, new_blkg); 1477 if (IS_ERR(blkg)) 1478 goto err_unlock; 1479 q->root_blkg = blkg; 1480 spin_unlock_irq(&q->queue_lock); 1481 1482 if (preloaded) 1483 radix_tree_preload_end(); 1484 1485 return 0; 1486 1487 err_unlock: 1488 spin_unlock_irq(&q->queue_lock); 1489 if (preloaded) 1490 radix_tree_preload_end(); 1491 return PTR_ERR(blkg); 1492 } 1493 1494 void blkcg_exit_disk(struct gendisk *disk) 1495 { 1496 blkg_destroy_all(disk); 1497 blk_throtl_exit(disk); 1498 } 1499 1500 static void blkcg_exit(struct task_struct *tsk) 1501 { 1502 if (tsk->throttle_disk) 1503 put_disk(tsk->throttle_disk); 1504 tsk->throttle_disk = NULL; 1505 } 1506 1507 struct cgroup_subsys io_cgrp_subsys = { 1508 .css_alloc = blkcg_css_alloc, 1509 .css_online = blkcg_css_online, 1510 .css_offline = blkcg_css_offline, 1511 .css_free = blkcg_css_free, 1512 .css_rstat_flush = blkcg_rstat_flush, 1513 .dfl_cftypes = blkcg_files, 1514 .legacy_cftypes = blkcg_legacy_files, 1515 .legacy_name = "blkio", 1516 .exit = blkcg_exit, 1517 #ifdef CONFIG_MEMCG 1518 /* 1519 * This ensures that, if available, memcg is automatically enabled 1520 * together on the default hierarchy so that the owner cgroup can 1521 * be retrieved from writeback pages. 1522 */ 1523 .depends_on = 1 << memory_cgrp_id, 1524 #endif 1525 }; 1526 EXPORT_SYMBOL_GPL(io_cgrp_subsys); 1527 1528 /** 1529 * blkcg_activate_policy - activate a blkcg policy on a gendisk 1530 * @disk: gendisk of interest 1531 * @pol: blkcg policy to activate 1532 * 1533 * Activate @pol on @disk. Requires %GFP_KERNEL context. @disk goes through 1534 * bypass mode to populate its blkgs with policy_data for @pol. 1535 * 1536 * Activation happens with @disk bypassed, so nobody would be accessing blkgs 1537 * from IO path. Update of each blkg is protected by both queue and blkcg 1538 * locks so that holding either lock and testing blkcg_policy_enabled() is 1539 * always enough for dereferencing policy data. 1540 * 1541 * The caller is responsible for synchronizing [de]activations and policy 1542 * [un]registerations. Returns 0 on success, -errno on failure. 1543 */ 1544 int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol) 1545 { 1546 struct request_queue *q = disk->queue; 1547 struct blkg_policy_data *pd_prealloc = NULL; 1548 struct blkcg_gq *blkg, *pinned_blkg = NULL; 1549 unsigned int memflags; 1550 int ret; 1551 1552 if (blkcg_policy_enabled(q, pol)) 1553 return 0; 1554 1555 /* 1556 * Policy is allowed to be registered without pd_alloc_fn/pd_free_fn, 1557 * for example, ioprio. Such policy will work on blkcg level, not disk 1558 * level, and don't need to be activated. 1559 */ 1560 if (WARN_ON_ONCE(!pol->pd_alloc_fn || !pol->pd_free_fn)) 1561 return -EINVAL; 1562 1563 if (queue_is_mq(q)) 1564 memflags = blk_mq_freeze_queue(q); 1565 retry: 1566 spin_lock_irq(&q->queue_lock); 1567 1568 /* blkg_list is pushed at the head, reverse walk to initialize parents first */ 1569 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) { 1570 struct blkg_policy_data *pd; 1571 1572 if (blkg->pd[pol->plid]) 1573 continue; 1574 1575 /* If prealloc matches, use it; otherwise try GFP_NOWAIT */ 1576 if (blkg == pinned_blkg) { 1577 pd = pd_prealloc; 1578 pd_prealloc = NULL; 1579 } else { 1580 pd = pol->pd_alloc_fn(disk, blkg->blkcg, 1581 GFP_NOWAIT | __GFP_NOWARN); 1582 } 1583 1584 if (!pd) { 1585 /* 1586 * GFP_NOWAIT failed. Free the existing one and 1587 * prealloc for @blkg w/ GFP_KERNEL. 1588 */ 1589 if (pinned_blkg) 1590 blkg_put(pinned_blkg); 1591 blkg_get(blkg); 1592 pinned_blkg = blkg; 1593 1594 spin_unlock_irq(&q->queue_lock); 1595 1596 if (pd_prealloc) 1597 pol->pd_free_fn(pd_prealloc); 1598 pd_prealloc = pol->pd_alloc_fn(disk, blkg->blkcg, 1599 GFP_KERNEL); 1600 if (pd_prealloc) 1601 goto retry; 1602 else 1603 goto enomem; 1604 } 1605 1606 spin_lock(&blkg->blkcg->lock); 1607 1608 pd->blkg = blkg; 1609 pd->plid = pol->plid; 1610 blkg->pd[pol->plid] = pd; 1611 1612 if (pol->pd_init_fn) 1613 pol->pd_init_fn(pd); 1614 1615 if (pol->pd_online_fn) 1616 pol->pd_online_fn(pd); 1617 pd->online = true; 1618 1619 spin_unlock(&blkg->blkcg->lock); 1620 } 1621 1622 __set_bit(pol->plid, q->blkcg_pols); 1623 ret = 0; 1624 1625 spin_unlock_irq(&q->queue_lock); 1626 out: 1627 if (queue_is_mq(q)) 1628 blk_mq_unfreeze_queue(q, memflags); 1629 if (pinned_blkg) 1630 blkg_put(pinned_blkg); 1631 if (pd_prealloc) 1632 pol->pd_free_fn(pd_prealloc); 1633 return ret; 1634 1635 enomem: 1636 /* alloc failed, take down everything */ 1637 spin_lock_irq(&q->queue_lock); 1638 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1639 struct blkcg *blkcg = blkg->blkcg; 1640 struct blkg_policy_data *pd; 1641 1642 spin_lock(&blkcg->lock); 1643 pd = blkg->pd[pol->plid]; 1644 if (pd) { 1645 if (pd->online && pol->pd_offline_fn) 1646 pol->pd_offline_fn(pd); 1647 pd->online = false; 1648 pol->pd_free_fn(pd); 1649 blkg->pd[pol->plid] = NULL; 1650 } 1651 spin_unlock(&blkcg->lock); 1652 } 1653 spin_unlock_irq(&q->queue_lock); 1654 ret = -ENOMEM; 1655 goto out; 1656 } 1657 EXPORT_SYMBOL_GPL(blkcg_activate_policy); 1658 1659 /** 1660 * blkcg_deactivate_policy - deactivate a blkcg policy on a gendisk 1661 * @disk: gendisk of interest 1662 * @pol: blkcg policy to deactivate 1663 * 1664 * Deactivate @pol on @disk. Follows the same synchronization rules as 1665 * blkcg_activate_policy(). 1666 */ 1667 void blkcg_deactivate_policy(struct gendisk *disk, 1668 const struct blkcg_policy *pol) 1669 { 1670 struct request_queue *q = disk->queue; 1671 struct blkcg_gq *blkg; 1672 unsigned int memflags; 1673 1674 if (!blkcg_policy_enabled(q, pol)) 1675 return; 1676 1677 if (queue_is_mq(q)) 1678 memflags = blk_mq_freeze_queue(q); 1679 1680 mutex_lock(&q->blkcg_mutex); 1681 spin_lock_irq(&q->queue_lock); 1682 1683 __clear_bit(pol->plid, q->blkcg_pols); 1684 1685 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1686 struct blkcg *blkcg = blkg->blkcg; 1687 1688 spin_lock(&blkcg->lock); 1689 if (blkg->pd[pol->plid]) { 1690 if (blkg->pd[pol->plid]->online && pol->pd_offline_fn) 1691 pol->pd_offline_fn(blkg->pd[pol->plid]); 1692 pol->pd_free_fn(blkg->pd[pol->plid]); 1693 blkg->pd[pol->plid] = NULL; 1694 } 1695 spin_unlock(&blkcg->lock); 1696 } 1697 1698 spin_unlock_irq(&q->queue_lock); 1699 mutex_unlock(&q->blkcg_mutex); 1700 1701 if (queue_is_mq(q)) 1702 blk_mq_unfreeze_queue(q, memflags); 1703 } 1704 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); 1705 1706 static void blkcg_free_all_cpd(struct blkcg_policy *pol) 1707 { 1708 struct blkcg *blkcg; 1709 1710 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { 1711 if (blkcg->cpd[pol->plid]) { 1712 pol->cpd_free_fn(blkcg->cpd[pol->plid]); 1713 blkcg->cpd[pol->plid] = NULL; 1714 } 1715 } 1716 } 1717 1718 /** 1719 * blkcg_policy_register - register a blkcg policy 1720 * @pol: blkcg policy to register 1721 * 1722 * Register @pol with blkcg core. Might sleep and @pol may be modified on 1723 * successful registration. Returns 0 on success and -errno on failure. 1724 */ 1725 int blkcg_policy_register(struct blkcg_policy *pol) 1726 { 1727 struct blkcg *blkcg; 1728 int i, ret; 1729 1730 mutex_lock(&blkcg_pol_register_mutex); 1731 mutex_lock(&blkcg_pol_mutex); 1732 1733 /* find an empty slot */ 1734 ret = -ENOSPC; 1735 for (i = 0; i < BLKCG_MAX_POLS; i++) 1736 if (!blkcg_policy[i]) 1737 break; 1738 if (i >= BLKCG_MAX_POLS) { 1739 pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n"); 1740 goto err_unlock; 1741 } 1742 1743 /* 1744 * Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs, and policy 1745 * without pd_alloc_fn/pd_free_fn can't be activated. 1746 */ 1747 if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) || 1748 (!pol->pd_alloc_fn ^ !pol->pd_free_fn)) 1749 goto err_unlock; 1750 1751 /* register @pol */ 1752 pol->plid = i; 1753 blkcg_policy[pol->plid] = pol; 1754 1755 /* allocate and install cpd's */ 1756 if (pol->cpd_alloc_fn) { 1757 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { 1758 struct blkcg_policy_data *cpd; 1759 1760 cpd = pol->cpd_alloc_fn(GFP_KERNEL); 1761 if (!cpd) 1762 goto err_free_cpds; 1763 1764 blkcg->cpd[pol->plid] = cpd; 1765 cpd->blkcg = blkcg; 1766 cpd->plid = pol->plid; 1767 } 1768 } 1769 1770 mutex_unlock(&blkcg_pol_mutex); 1771 1772 /* everything is in place, add intf files for the new policy */ 1773 if (pol->dfl_cftypes) 1774 WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys, 1775 pol->dfl_cftypes)); 1776 if (pol->legacy_cftypes) 1777 WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys, 1778 pol->legacy_cftypes)); 1779 mutex_unlock(&blkcg_pol_register_mutex); 1780 return 0; 1781 1782 err_free_cpds: 1783 if (pol->cpd_free_fn) 1784 blkcg_free_all_cpd(pol); 1785 1786 blkcg_policy[pol->plid] = NULL; 1787 err_unlock: 1788 mutex_unlock(&blkcg_pol_mutex); 1789 mutex_unlock(&blkcg_pol_register_mutex); 1790 return ret; 1791 } 1792 EXPORT_SYMBOL_GPL(blkcg_policy_register); 1793 1794 /** 1795 * blkcg_policy_unregister - unregister a blkcg policy 1796 * @pol: blkcg policy to unregister 1797 * 1798 * Undo blkcg_policy_register(@pol). Might sleep. 1799 */ 1800 void blkcg_policy_unregister(struct blkcg_policy *pol) 1801 { 1802 mutex_lock(&blkcg_pol_register_mutex); 1803 1804 if (WARN_ON(blkcg_policy[pol->plid] != pol)) 1805 goto out_unlock; 1806 1807 /* kill the intf files first */ 1808 if (pol->dfl_cftypes) 1809 cgroup_rm_cftypes(pol->dfl_cftypes); 1810 if (pol->legacy_cftypes) 1811 cgroup_rm_cftypes(pol->legacy_cftypes); 1812 1813 /* remove cpds and unregister */ 1814 mutex_lock(&blkcg_pol_mutex); 1815 1816 if (pol->cpd_free_fn) 1817 blkcg_free_all_cpd(pol); 1818 1819 blkcg_policy[pol->plid] = NULL; 1820 1821 mutex_unlock(&blkcg_pol_mutex); 1822 out_unlock: 1823 mutex_unlock(&blkcg_pol_register_mutex); 1824 } 1825 EXPORT_SYMBOL_GPL(blkcg_policy_unregister); 1826 1827 /* 1828 * Scale the accumulated delay based on how long it has been since we updated 1829 * the delay. We only call this when we are adding delay, in case it's been a 1830 * while since we added delay, and when we are checking to see if we need to 1831 * delay a task, to account for any delays that may have occurred. 1832 */ 1833 static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now) 1834 { 1835 u64 old = atomic64_read(&blkg->delay_start); 1836 1837 /* negative use_delay means no scaling, see blkcg_set_delay() */ 1838 if (atomic_read(&blkg->use_delay) < 0) 1839 return; 1840 1841 /* 1842 * We only want to scale down every second. The idea here is that we 1843 * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain 1844 * time window. We only want to throttle tasks for recent delay that 1845 * has occurred, in 1 second time windows since that's the maximum 1846 * things can be throttled. We save the current delay window in 1847 * blkg->last_delay so we know what amount is still left to be charged 1848 * to the blkg from this point onward. blkg->last_use keeps track of 1849 * the use_delay counter. The idea is if we're unthrottling the blkg we 1850 * are ok with whatever is happening now, and we can take away more of 1851 * the accumulated delay as we've already throttled enough that 1852 * everybody is happy with their IO latencies. 1853 */ 1854 if (time_before64(old + NSEC_PER_SEC, now) && 1855 atomic64_try_cmpxchg(&blkg->delay_start, &old, now)) { 1856 u64 cur = atomic64_read(&blkg->delay_nsec); 1857 u64 sub = min_t(u64, blkg->last_delay, now - old); 1858 int cur_use = atomic_read(&blkg->use_delay); 1859 1860 /* 1861 * We've been unthrottled, subtract a larger chunk of our 1862 * accumulated delay. 1863 */ 1864 if (cur_use < blkg->last_use) 1865 sub = max_t(u64, sub, blkg->last_delay >> 1); 1866 1867 /* 1868 * This shouldn't happen, but handle it anyway. Our delay_nsec 1869 * should only ever be growing except here where we subtract out 1870 * min(last_delay, 1 second), but lord knows bugs happen and I'd 1871 * rather not end up with negative numbers. 1872 */ 1873 if (unlikely(cur < sub)) { 1874 atomic64_set(&blkg->delay_nsec, 0); 1875 blkg->last_delay = 0; 1876 } else { 1877 atomic64_sub(sub, &blkg->delay_nsec); 1878 blkg->last_delay = cur - sub; 1879 } 1880 blkg->last_use = cur_use; 1881 } 1882 } 1883 1884 /* 1885 * This is called when we want to actually walk up the hierarchy and check to 1886 * see if we need to throttle, and then actually throttle if there is some 1887 * accumulated delay. This should only be called upon return to user space so 1888 * we're not holding some lock that would induce a priority inversion. 1889 */ 1890 static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay) 1891 { 1892 unsigned long pflags; 1893 bool clamp; 1894 u64 now = blk_time_get_ns(); 1895 u64 exp; 1896 u64 delay_nsec = 0; 1897 int tok; 1898 1899 while (blkg->parent) { 1900 int use_delay = atomic_read(&blkg->use_delay); 1901 1902 if (use_delay) { 1903 u64 this_delay; 1904 1905 blkcg_scale_delay(blkg, now); 1906 this_delay = atomic64_read(&blkg->delay_nsec); 1907 if (this_delay > delay_nsec) { 1908 delay_nsec = this_delay; 1909 clamp = use_delay > 0; 1910 } 1911 } 1912 blkg = blkg->parent; 1913 } 1914 1915 if (!delay_nsec) 1916 return; 1917 1918 /* 1919 * Let's not sleep for all eternity if we've amassed a huge delay. 1920 * Swapping or metadata IO can accumulate 10's of seconds worth of 1921 * delay, and we want userspace to be able to do _something_ so cap the 1922 * delays at 0.25s. If there's 10's of seconds worth of delay then the 1923 * tasks will be delayed for 0.25 second for every syscall. If 1924 * blkcg_set_delay() was used as indicated by negative use_delay, the 1925 * caller is responsible for regulating the range. 1926 */ 1927 if (clamp) 1928 delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC); 1929 1930 if (use_memdelay) 1931 psi_memstall_enter(&pflags); 1932 1933 exp = ktime_add_ns(now, delay_nsec); 1934 tok = io_schedule_prepare(); 1935 do { 1936 __set_current_state(TASK_KILLABLE); 1937 if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS)) 1938 break; 1939 } while (!fatal_signal_pending(current)); 1940 io_schedule_finish(tok); 1941 1942 if (use_memdelay) 1943 psi_memstall_leave(&pflags); 1944 } 1945 1946 /** 1947 * blkcg_maybe_throttle_current - throttle the current task if it has been marked 1948 * 1949 * This is only called if we've been marked with set_notify_resume(). Obviously 1950 * we can be set_notify_resume() for reasons other than blkcg throttling, so we 1951 * check to see if current->throttle_disk is set and if not this doesn't do 1952 * anything. This should only ever be called by the resume code, it's not meant 1953 * to be called by people willy-nilly as it will actually do the work to 1954 * throttle the task if it is setup for throttling. 1955 */ 1956 void blkcg_maybe_throttle_current(void) 1957 { 1958 struct gendisk *disk = current->throttle_disk; 1959 struct blkcg *blkcg; 1960 struct blkcg_gq *blkg; 1961 bool use_memdelay = current->use_memdelay; 1962 1963 if (!disk) 1964 return; 1965 1966 current->throttle_disk = NULL; 1967 current->use_memdelay = false; 1968 1969 rcu_read_lock(); 1970 blkcg = css_to_blkcg(blkcg_css()); 1971 if (!blkcg) 1972 goto out; 1973 blkg = blkg_lookup(blkcg, disk->queue); 1974 if (!blkg) 1975 goto out; 1976 if (!blkg_tryget(blkg)) 1977 goto out; 1978 rcu_read_unlock(); 1979 1980 blkcg_maybe_throttle_blkg(blkg, use_memdelay); 1981 blkg_put(blkg); 1982 put_disk(disk); 1983 return; 1984 out: 1985 rcu_read_unlock(); 1986 } 1987 1988 /** 1989 * blkcg_schedule_throttle - this task needs to check for throttling 1990 * @disk: disk to throttle 1991 * @use_memdelay: do we charge this to memory delay for PSI 1992 * 1993 * This is called by the IO controller when we know there's delay accumulated 1994 * for the blkg for this task. We do not pass the blkg because there are places 1995 * we call this that may not have that information, the swapping code for 1996 * instance will only have a block_device at that point. This set's the 1997 * notify_resume for the task to check and see if it requires throttling before 1998 * returning to user space. 1999 * 2000 * We will only schedule once per syscall. You can call this over and over 2001 * again and it will only do the check once upon return to user space, and only 2002 * throttle once. If the task needs to be throttled again it'll need to be 2003 * re-set at the next time we see the task. 2004 */ 2005 void blkcg_schedule_throttle(struct gendisk *disk, bool use_memdelay) 2006 { 2007 if (unlikely(current->flags & PF_KTHREAD)) 2008 return; 2009 2010 if (current->throttle_disk != disk) { 2011 if (test_bit(GD_DEAD, &disk->state)) 2012 return; 2013 get_device(disk_to_dev(disk)); 2014 2015 if (current->throttle_disk) 2016 put_disk(current->throttle_disk); 2017 current->throttle_disk = disk; 2018 } 2019 2020 if (use_memdelay) 2021 current->use_memdelay = use_memdelay; 2022 set_notify_resume(current); 2023 } 2024 2025 /** 2026 * blkcg_add_delay - add delay to this blkg 2027 * @blkg: blkg of interest 2028 * @now: the current time in nanoseconds 2029 * @delta: how many nanoseconds of delay to add 2030 * 2031 * Charge @delta to the blkg's current delay accumulation. This is used to 2032 * throttle tasks if an IO controller thinks we need more throttling. 2033 */ 2034 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta) 2035 { 2036 if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) 2037 return; 2038 blkcg_scale_delay(blkg, now); 2039 atomic64_add(delta, &blkg->delay_nsec); 2040 } 2041 2042 /** 2043 * blkg_tryget_closest - try and get a blkg ref on the closet blkg 2044 * @bio: target bio 2045 * @css: target css 2046 * 2047 * As the failure mode here is to walk up the blkg tree, this ensure that the 2048 * blkg->parent pointers are always valid. This returns the blkg that it ended 2049 * up taking a reference on or %NULL if no reference was taken. 2050 */ 2051 static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio, 2052 struct cgroup_subsys_state *css) 2053 { 2054 struct blkcg_gq *blkg, *ret_blkg = NULL; 2055 2056 rcu_read_lock(); 2057 blkg = blkg_lookup_create(css_to_blkcg(css), bio->bi_bdev->bd_disk); 2058 while (blkg) { 2059 if (blkg_tryget(blkg)) { 2060 ret_blkg = blkg; 2061 break; 2062 } 2063 blkg = blkg->parent; 2064 } 2065 rcu_read_unlock(); 2066 2067 return ret_blkg; 2068 } 2069 2070 /** 2071 * bio_associate_blkg_from_css - associate a bio with a specified css 2072 * @bio: target bio 2073 * @css: target css 2074 * 2075 * Associate @bio with the blkg found by combining the css's blkg and the 2076 * request_queue of the @bio. An association failure is handled by walking up 2077 * the blkg tree. Therefore, the blkg associated can be anything between @blkg 2078 * and q->root_blkg. This situation only happens when a cgroup is dying and 2079 * then the remaining bios will spill to the closest alive blkg. 2080 * 2081 * A reference will be taken on the blkg and will be released when @bio is 2082 * freed. 2083 */ 2084 void bio_associate_blkg_from_css(struct bio *bio, 2085 struct cgroup_subsys_state *css) 2086 { 2087 if (bio->bi_blkg) 2088 blkg_put(bio->bi_blkg); 2089 2090 if (css && css->parent) { 2091 bio->bi_blkg = blkg_tryget_closest(bio, css); 2092 } else { 2093 blkg_get(bdev_get_queue(bio->bi_bdev)->root_blkg); 2094 bio->bi_blkg = bdev_get_queue(bio->bi_bdev)->root_blkg; 2095 } 2096 } 2097 EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css); 2098 2099 /** 2100 * bio_associate_blkg - associate a bio with a blkg 2101 * @bio: target bio 2102 * 2103 * Associate @bio with the blkg found from the bio's css and request_queue. 2104 * If one is not found, bio_lookup_blkg() creates the blkg. If a blkg is 2105 * already associated, the css is reused and association redone as the 2106 * request_queue may have changed. 2107 */ 2108 void bio_associate_blkg(struct bio *bio) 2109 { 2110 struct cgroup_subsys_state *css; 2111 2112 if (blk_op_is_passthrough(bio->bi_opf)) 2113 return; 2114 2115 rcu_read_lock(); 2116 2117 if (bio->bi_blkg) 2118 css = bio_blkcg_css(bio); 2119 else 2120 css = blkcg_css(); 2121 2122 bio_associate_blkg_from_css(bio, css); 2123 2124 rcu_read_unlock(); 2125 } 2126 EXPORT_SYMBOL_GPL(bio_associate_blkg); 2127 2128 /** 2129 * bio_clone_blkg_association - clone blkg association from src to dst bio 2130 * @dst: destination bio 2131 * @src: source bio 2132 */ 2133 void bio_clone_blkg_association(struct bio *dst, struct bio *src) 2134 { 2135 if (src->bi_blkg) 2136 bio_associate_blkg_from_css(dst, bio_blkcg_css(src)); 2137 } 2138 EXPORT_SYMBOL_GPL(bio_clone_blkg_association); 2139 2140 static int blk_cgroup_io_type(struct bio *bio) 2141 { 2142 if (op_is_discard(bio->bi_opf)) 2143 return BLKG_IOSTAT_DISCARD; 2144 if (op_is_write(bio->bi_opf)) 2145 return BLKG_IOSTAT_WRITE; 2146 return BLKG_IOSTAT_READ; 2147 } 2148 2149 void blk_cgroup_bio_start(struct bio *bio) 2150 { 2151 struct blkcg *blkcg = bio->bi_blkg->blkcg; 2152 int rwd = blk_cgroup_io_type(bio), cpu; 2153 struct blkg_iostat_set *bis; 2154 unsigned long flags; 2155 2156 if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) 2157 return; 2158 2159 /* Root-level stats are sourced from system-wide IO stats */ 2160 if (!cgroup_parent(blkcg->css.cgroup)) 2161 return; 2162 2163 cpu = get_cpu(); 2164 bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu); 2165 flags = u64_stats_update_begin_irqsave(&bis->sync); 2166 2167 /* 2168 * If the bio is flagged with BIO_CGROUP_ACCT it means this is a split 2169 * bio and we would have already accounted for the size of the bio. 2170 */ 2171 if (!bio_flagged(bio, BIO_CGROUP_ACCT)) { 2172 bio_set_flag(bio, BIO_CGROUP_ACCT); 2173 bis->cur.bytes[rwd] += bio->bi_iter.bi_size; 2174 } 2175 bis->cur.ios[rwd]++; 2176 2177 /* 2178 * If the iostat_cpu isn't in a lockless list, put it into the 2179 * list to indicate that a stat update is pending. 2180 */ 2181 if (!READ_ONCE(bis->lqueued)) { 2182 struct llist_head *lhead = this_cpu_ptr(blkcg->lhead); 2183 2184 llist_add(&bis->lnode, lhead); 2185 WRITE_ONCE(bis->lqueued, true); 2186 } 2187 2188 u64_stats_update_end_irqrestore(&bis->sync, flags); 2189 cgroup_rstat_updated(blkcg->css.cgroup, cpu); 2190 put_cpu(); 2191 } 2192 2193 bool blk_cgroup_congested(void) 2194 { 2195 struct blkcg *blkcg; 2196 bool ret = false; 2197 2198 rcu_read_lock(); 2199 for (blkcg = css_to_blkcg(blkcg_css()); blkcg; 2200 blkcg = blkcg_parent(blkcg)) { 2201 if (atomic_read(&blkcg->congestion_count)) { 2202 ret = true; 2203 break; 2204 } 2205 } 2206 rcu_read_unlock(); 2207 return ret; 2208 } 2209 2210 module_param(blkcg_debug_stats, bool, 0644); 2211 MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not"); 2212