1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Common Block IO controller cgroup interface 4 * 5 * Based on ideas and code from CFQ, CFS and BFQ: 6 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 7 * 8 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> 9 * Paolo Valente <paolo.valente@unimore.it> 10 * 11 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> 12 * Nauman Rafique <nauman@google.com> 13 * 14 * For policy-specific per-blkcg data: 15 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it> 16 * Arianna Avanzini <avanzini.arianna@gmail.com> 17 */ 18 #include <linux/ioprio.h> 19 #include <linux/kdev_t.h> 20 #include <linux/module.h> 21 #include <linux/sched/signal.h> 22 #include <linux/err.h> 23 #include <linux/blkdev.h> 24 #include <linux/backing-dev.h> 25 #include <linux/slab.h> 26 #include <linux/delay.h> 27 #include <linux/atomic.h> 28 #include <linux/ctype.h> 29 #include <linux/resume_user_mode.h> 30 #include <linux/psi.h> 31 #include <linux/part_stat.h> 32 #include "blk.h" 33 #include "blk-cgroup.h" 34 #include "blk-ioprio.h" 35 #include "blk-throttle.h" 36 37 static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu); 38 39 /* 40 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation. 41 * blkcg_pol_register_mutex nests outside of it and synchronizes entire 42 * policy [un]register operations including cgroup file additions / 43 * removals. Putting cgroup file registration outside blkcg_pol_mutex 44 * allows grabbing it from cgroup callbacks. 45 */ 46 static DEFINE_MUTEX(blkcg_pol_register_mutex); 47 static DEFINE_MUTEX(blkcg_pol_mutex); 48 49 struct blkcg blkcg_root; 50 EXPORT_SYMBOL_GPL(blkcg_root); 51 52 struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css; 53 EXPORT_SYMBOL_GPL(blkcg_root_css); 54 55 static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; 56 57 static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */ 58 59 bool blkcg_debug_stats = false; 60 61 static DEFINE_RAW_SPINLOCK(blkg_stat_lock); 62 63 #define BLKG_DESTROY_BATCH_SIZE 64 64 65 /* 66 * Lockless lists for tracking IO stats update 67 * 68 * New IO stats are stored in the percpu iostat_cpu within blkcg_gq (blkg). 69 * There are multiple blkg's (one for each block device) attached to each 70 * blkcg. The rstat code keeps track of which cpu has IO stats updated, 71 * but it doesn't know which blkg has the updated stats. If there are many 72 * block devices in a system, the cost of iterating all the blkg's to flush 73 * out the IO stats can be high. To reduce such overhead, a set of percpu 74 * lockless lists (lhead) per blkcg are used to track the set of recently 75 * updated iostat_cpu's since the last flush. An iostat_cpu will be put 76 * onto the lockless list on the update side [blk_cgroup_bio_start()] if 77 * not there yet and then removed when being flushed [blkcg_rstat_flush()]. 78 * References to blkg are gotten and then put back in the process to 79 * protect against blkg removal. 80 * 81 * Return: 0 if successful or -ENOMEM if allocation fails. 82 */ 83 static int init_blkcg_llists(struct blkcg *blkcg) 84 { 85 int cpu; 86 87 blkcg->lhead = alloc_percpu_gfp(struct llist_head, GFP_KERNEL); 88 if (!blkcg->lhead) 89 return -ENOMEM; 90 91 for_each_possible_cpu(cpu) 92 init_llist_head(per_cpu_ptr(blkcg->lhead, cpu)); 93 return 0; 94 } 95 96 /** 97 * blkcg_css - find the current css 98 * 99 * Find the css associated with either the kthread or the current task. 100 * This may return a dying css, so it is up to the caller to use tryget logic 101 * to confirm it is alive and well. 102 */ 103 static struct cgroup_subsys_state *blkcg_css(void) 104 { 105 struct cgroup_subsys_state *css; 106 107 css = kthread_blkcg(); 108 if (css) 109 return css; 110 return task_css(current, io_cgrp_id); 111 } 112 113 static void blkg_free_workfn(struct work_struct *work) 114 { 115 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, 116 free_work); 117 struct request_queue *q = blkg->q; 118 int i; 119 120 /* 121 * pd_free_fn() can also be called from blkcg_deactivate_policy(), 122 * in order to make sure pd_free_fn() is called in order, the deletion 123 * of the list blkg->q_node is delayed to here from blkg_destroy(), and 124 * blkcg_mutex is used to synchronize blkg_free_workfn() and 125 * blkcg_deactivate_policy(). 126 */ 127 mutex_lock(&q->blkcg_mutex); 128 for (i = 0; i < BLKCG_MAX_POLS; i++) 129 if (blkg->pd[i]) 130 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); 131 if (blkg->parent) 132 blkg_put(blkg->parent); 133 spin_lock_irq(&q->queue_lock); 134 list_del_init(&blkg->q_node); 135 spin_unlock_irq(&q->queue_lock); 136 mutex_unlock(&q->blkcg_mutex); 137 138 blk_put_queue(q); 139 free_percpu(blkg->iostat_cpu); 140 percpu_ref_exit(&blkg->refcnt); 141 kfree(blkg); 142 } 143 144 /** 145 * blkg_free - free a blkg 146 * @blkg: blkg to free 147 * 148 * Free @blkg which may be partially allocated. 149 */ 150 static void blkg_free(struct blkcg_gq *blkg) 151 { 152 if (!blkg) 153 return; 154 155 /* 156 * Both ->pd_free_fn() and request queue's release handler may 157 * sleep, so free us by scheduling one work func 158 */ 159 INIT_WORK(&blkg->free_work, blkg_free_workfn); 160 schedule_work(&blkg->free_work); 161 } 162 163 static void __blkg_release(struct rcu_head *rcu) 164 { 165 struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head); 166 struct blkcg *blkcg = blkg->blkcg; 167 int cpu; 168 169 #ifdef CONFIG_BLK_CGROUP_PUNT_BIO 170 WARN_ON(!bio_list_empty(&blkg->async_bios)); 171 #endif 172 /* 173 * Flush all the non-empty percpu lockless lists before releasing 174 * us, given these stat belongs to us. 175 * 176 * blkg_stat_lock is for serializing blkg stat update 177 */ 178 for_each_possible_cpu(cpu) 179 __blkcg_rstat_flush(blkcg, cpu); 180 181 /* release the blkcg and parent blkg refs this blkg has been holding */ 182 css_put(&blkg->blkcg->css); 183 blkg_free(blkg); 184 } 185 186 /* 187 * A group is RCU protected, but having an rcu lock does not mean that one 188 * can access all the fields of blkg and assume these are valid. For 189 * example, don't try to follow throtl_data and request queue links. 190 * 191 * Having a reference to blkg under an rcu allows accesses to only values 192 * local to groups like group stats and group rate limits. 193 */ 194 static void blkg_release(struct percpu_ref *ref) 195 { 196 struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt); 197 198 call_rcu(&blkg->rcu_head, __blkg_release); 199 } 200 201 #ifdef CONFIG_BLK_CGROUP_PUNT_BIO 202 static struct workqueue_struct *blkcg_punt_bio_wq; 203 204 static void blkg_async_bio_workfn(struct work_struct *work) 205 { 206 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq, 207 async_bio_work); 208 struct bio_list bios = BIO_EMPTY_LIST; 209 struct bio *bio; 210 struct blk_plug plug; 211 bool need_plug = false; 212 213 /* as long as there are pending bios, @blkg can't go away */ 214 spin_lock(&blkg->async_bio_lock); 215 bio_list_merge_init(&bios, &blkg->async_bios); 216 spin_unlock(&blkg->async_bio_lock); 217 218 /* start plug only when bio_list contains at least 2 bios */ 219 if (bios.head && bios.head->bi_next) { 220 need_plug = true; 221 blk_start_plug(&plug); 222 } 223 while ((bio = bio_list_pop(&bios))) 224 submit_bio(bio); 225 if (need_plug) 226 blk_finish_plug(&plug); 227 } 228 229 /* 230 * When a shared kthread issues a bio for a cgroup, doing so synchronously can 231 * lead to priority inversions as the kthread can be trapped waiting for that 232 * cgroup. Use this helper instead of submit_bio to punt the actual issuing to 233 * a dedicated per-blkcg work item to avoid such priority inversions. 234 */ 235 void blkcg_punt_bio_submit(struct bio *bio) 236 { 237 struct blkcg_gq *blkg = bio->bi_blkg; 238 239 if (blkg->parent) { 240 spin_lock(&blkg->async_bio_lock); 241 bio_list_add(&blkg->async_bios, bio); 242 spin_unlock(&blkg->async_bio_lock); 243 queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work); 244 } else { 245 /* never bounce for the root cgroup */ 246 submit_bio(bio); 247 } 248 } 249 EXPORT_SYMBOL_GPL(blkcg_punt_bio_submit); 250 251 static int __init blkcg_punt_bio_init(void) 252 { 253 blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio", 254 WQ_MEM_RECLAIM | WQ_FREEZABLE | 255 WQ_UNBOUND | WQ_SYSFS, 0); 256 if (!blkcg_punt_bio_wq) 257 return -ENOMEM; 258 return 0; 259 } 260 subsys_initcall(blkcg_punt_bio_init); 261 #endif /* CONFIG_BLK_CGROUP_PUNT_BIO */ 262 263 /** 264 * bio_blkcg_css - return the blkcg CSS associated with a bio 265 * @bio: target bio 266 * 267 * This returns the CSS for the blkcg associated with a bio, or %NULL if not 268 * associated. Callers are expected to either handle %NULL or know association 269 * has been done prior to calling this. 270 */ 271 struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio) 272 { 273 if (!bio || !bio->bi_blkg) 274 return NULL; 275 return &bio->bi_blkg->blkcg->css; 276 } 277 EXPORT_SYMBOL_GPL(bio_blkcg_css); 278 279 /** 280 * blkcg_parent - get the parent of a blkcg 281 * @blkcg: blkcg of interest 282 * 283 * Return the parent blkcg of @blkcg. Can be called anytime. 284 */ 285 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg) 286 { 287 return css_to_blkcg(blkcg->css.parent); 288 } 289 290 /** 291 * blkg_alloc - allocate a blkg 292 * @blkcg: block cgroup the new blkg is associated with 293 * @disk: gendisk the new blkg is associated with 294 * @gfp_mask: allocation mask to use 295 * 296 * Allocate a new blkg associating @blkcg and @disk. 297 */ 298 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk, 299 gfp_t gfp_mask) 300 { 301 struct blkcg_gq *blkg; 302 int i, cpu; 303 304 /* alloc and init base part */ 305 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, disk->queue->node); 306 if (!blkg) 307 return NULL; 308 if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask)) 309 goto out_free_blkg; 310 blkg->iostat_cpu = alloc_percpu_gfp(struct blkg_iostat_set, gfp_mask); 311 if (!blkg->iostat_cpu) 312 goto out_exit_refcnt; 313 if (!blk_get_queue(disk->queue)) 314 goto out_free_iostat; 315 316 blkg->q = disk->queue; 317 INIT_LIST_HEAD(&blkg->q_node); 318 blkg->blkcg = blkcg; 319 blkg->iostat.blkg = blkg; 320 #ifdef CONFIG_BLK_CGROUP_PUNT_BIO 321 spin_lock_init(&blkg->async_bio_lock); 322 bio_list_init(&blkg->async_bios); 323 INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn); 324 #endif 325 326 u64_stats_init(&blkg->iostat.sync); 327 for_each_possible_cpu(cpu) { 328 u64_stats_init(&per_cpu_ptr(blkg->iostat_cpu, cpu)->sync); 329 per_cpu_ptr(blkg->iostat_cpu, cpu)->blkg = blkg; 330 } 331 332 for (i = 0; i < BLKCG_MAX_POLS; i++) { 333 struct blkcg_policy *pol = blkcg_policy[i]; 334 struct blkg_policy_data *pd; 335 336 if (!blkcg_policy_enabled(disk->queue, pol)) 337 continue; 338 339 /* alloc per-policy data and attach it to blkg */ 340 pd = pol->pd_alloc_fn(disk, blkcg, gfp_mask); 341 if (!pd) 342 goto out_free_pds; 343 blkg->pd[i] = pd; 344 pd->blkg = blkg; 345 pd->plid = i; 346 pd->online = false; 347 } 348 349 return blkg; 350 351 out_free_pds: 352 while (--i >= 0) 353 if (blkg->pd[i]) 354 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); 355 blk_put_queue(disk->queue); 356 out_free_iostat: 357 free_percpu(blkg->iostat_cpu); 358 out_exit_refcnt: 359 percpu_ref_exit(&blkg->refcnt); 360 out_free_blkg: 361 kfree(blkg); 362 return NULL; 363 } 364 365 /* 366 * If @new_blkg is %NULL, this function tries to allocate a new one as 367 * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return. 368 */ 369 static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct gendisk *disk, 370 struct blkcg_gq *new_blkg) 371 { 372 struct blkcg_gq *blkg; 373 int i, ret; 374 375 lockdep_assert_held(&disk->queue->queue_lock); 376 377 /* request_queue is dying, do not create/recreate a blkg */ 378 if (blk_queue_dying(disk->queue)) { 379 ret = -ENODEV; 380 goto err_free_blkg; 381 } 382 383 /* blkg holds a reference to blkcg */ 384 if (!css_tryget_online(&blkcg->css)) { 385 ret = -ENODEV; 386 goto err_free_blkg; 387 } 388 389 /* allocate */ 390 if (!new_blkg) { 391 new_blkg = blkg_alloc(blkcg, disk, GFP_NOWAIT); 392 if (unlikely(!new_blkg)) { 393 ret = -ENOMEM; 394 goto err_put_css; 395 } 396 } 397 blkg = new_blkg; 398 399 /* link parent */ 400 if (blkcg_parent(blkcg)) { 401 blkg->parent = blkg_lookup(blkcg_parent(blkcg), disk->queue); 402 if (WARN_ON_ONCE(!blkg->parent)) { 403 ret = -ENODEV; 404 goto err_put_css; 405 } 406 blkg_get(blkg->parent); 407 } 408 409 /* invoke per-policy init */ 410 for (i = 0; i < BLKCG_MAX_POLS; i++) { 411 struct blkcg_policy *pol = blkcg_policy[i]; 412 413 if (blkg->pd[i] && pol->pd_init_fn) 414 pol->pd_init_fn(blkg->pd[i]); 415 } 416 417 /* insert */ 418 spin_lock(&blkcg->lock); 419 ret = radix_tree_insert(&blkcg->blkg_tree, disk->queue->id, blkg); 420 if (likely(!ret)) { 421 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); 422 list_add(&blkg->q_node, &disk->queue->blkg_list); 423 424 for (i = 0; i < BLKCG_MAX_POLS; i++) { 425 struct blkcg_policy *pol = blkcg_policy[i]; 426 427 if (blkg->pd[i]) { 428 if (pol->pd_online_fn) 429 pol->pd_online_fn(blkg->pd[i]); 430 blkg->pd[i]->online = true; 431 } 432 } 433 } 434 blkg->online = true; 435 spin_unlock(&blkcg->lock); 436 437 if (!ret) 438 return blkg; 439 440 /* @blkg failed fully initialized, use the usual release path */ 441 blkg_put(blkg); 442 return ERR_PTR(ret); 443 444 err_put_css: 445 css_put(&blkcg->css); 446 err_free_blkg: 447 if (new_blkg) 448 blkg_free(new_blkg); 449 return ERR_PTR(ret); 450 } 451 452 /** 453 * blkg_lookup_create - lookup blkg, try to create one if not there 454 * @blkcg: blkcg of interest 455 * @disk: gendisk of interest 456 * 457 * Lookup blkg for the @blkcg - @disk pair. If it doesn't exist, try to 458 * create one. blkg creation is performed recursively from blkcg_root such 459 * that all non-root blkg's have access to the parent blkg. This function 460 * should be called under RCU read lock and takes @disk->queue->queue_lock. 461 * 462 * Returns the blkg or the closest blkg if blkg_create() fails as it walks 463 * down from root. 464 */ 465 static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, 466 struct gendisk *disk) 467 { 468 struct request_queue *q = disk->queue; 469 struct blkcg_gq *blkg; 470 unsigned long flags; 471 472 WARN_ON_ONCE(!rcu_read_lock_held()); 473 474 blkg = blkg_lookup(blkcg, q); 475 if (blkg) 476 return blkg; 477 478 spin_lock_irqsave(&q->queue_lock, flags); 479 blkg = blkg_lookup(blkcg, q); 480 if (blkg) { 481 if (blkcg != &blkcg_root && 482 blkg != rcu_dereference(blkcg->blkg_hint)) 483 rcu_assign_pointer(blkcg->blkg_hint, blkg); 484 goto found; 485 } 486 487 /* 488 * Create blkgs walking down from blkcg_root to @blkcg, so that all 489 * non-root blkgs have access to their parents. Returns the closest 490 * blkg to the intended blkg should blkg_create() fail. 491 */ 492 while (true) { 493 struct blkcg *pos = blkcg; 494 struct blkcg *parent = blkcg_parent(blkcg); 495 struct blkcg_gq *ret_blkg = q->root_blkg; 496 497 while (parent) { 498 blkg = blkg_lookup(parent, q); 499 if (blkg) { 500 /* remember closest blkg */ 501 ret_blkg = blkg; 502 break; 503 } 504 pos = parent; 505 parent = blkcg_parent(parent); 506 } 507 508 blkg = blkg_create(pos, disk, NULL); 509 if (IS_ERR(blkg)) { 510 blkg = ret_blkg; 511 break; 512 } 513 if (pos == blkcg) 514 break; 515 } 516 517 found: 518 spin_unlock_irqrestore(&q->queue_lock, flags); 519 return blkg; 520 } 521 522 static void blkg_destroy(struct blkcg_gq *blkg) 523 { 524 struct blkcg *blkcg = blkg->blkcg; 525 int i; 526 527 lockdep_assert_held(&blkg->q->queue_lock); 528 lockdep_assert_held(&blkcg->lock); 529 530 /* 531 * blkg stays on the queue list until blkg_free_workfn(), see details in 532 * blkg_free_workfn(), hence this function can be called from 533 * blkcg_destroy_blkgs() first and again from blkg_destroy_all() before 534 * blkg_free_workfn(). 535 */ 536 if (hlist_unhashed(&blkg->blkcg_node)) 537 return; 538 539 for (i = 0; i < BLKCG_MAX_POLS; i++) { 540 struct blkcg_policy *pol = blkcg_policy[i]; 541 542 if (blkg->pd[i] && blkg->pd[i]->online) { 543 blkg->pd[i]->online = false; 544 if (pol->pd_offline_fn) 545 pol->pd_offline_fn(blkg->pd[i]); 546 } 547 } 548 549 blkg->online = false; 550 551 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); 552 hlist_del_init_rcu(&blkg->blkcg_node); 553 554 /* 555 * Both setting lookup hint to and clearing it from @blkg are done 556 * under queue_lock. If it's not pointing to @blkg now, it never 557 * will. Hint assignment itself can race safely. 558 */ 559 if (rcu_access_pointer(blkcg->blkg_hint) == blkg) 560 rcu_assign_pointer(blkcg->blkg_hint, NULL); 561 562 /* 563 * Put the reference taken at the time of creation so that when all 564 * queues are gone, group can be destroyed. 565 */ 566 percpu_ref_kill(&blkg->refcnt); 567 } 568 569 static void blkg_destroy_all(struct gendisk *disk) 570 { 571 struct request_queue *q = disk->queue; 572 struct blkcg_gq *blkg; 573 int count = BLKG_DESTROY_BATCH_SIZE; 574 int i; 575 576 restart: 577 spin_lock_irq(&q->queue_lock); 578 list_for_each_entry(blkg, &q->blkg_list, q_node) { 579 struct blkcg *blkcg = blkg->blkcg; 580 581 if (hlist_unhashed(&blkg->blkcg_node)) 582 continue; 583 584 spin_lock(&blkcg->lock); 585 blkg_destroy(blkg); 586 spin_unlock(&blkcg->lock); 587 588 /* 589 * in order to avoid holding the spin lock for too long, release 590 * it when a batch of blkgs are destroyed. 591 */ 592 if (!(--count)) { 593 count = BLKG_DESTROY_BATCH_SIZE; 594 spin_unlock_irq(&q->queue_lock); 595 cond_resched(); 596 goto restart; 597 } 598 } 599 600 /* 601 * Mark policy deactivated since policy offline has been done, and 602 * the free is scheduled, so future blkcg_deactivate_policy() can 603 * be bypassed 604 */ 605 for (i = 0; i < BLKCG_MAX_POLS; i++) { 606 struct blkcg_policy *pol = blkcg_policy[i]; 607 608 if (pol) 609 __clear_bit(pol->plid, q->blkcg_pols); 610 } 611 612 q->root_blkg = NULL; 613 spin_unlock_irq(&q->queue_lock); 614 } 615 616 static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src) 617 { 618 int i; 619 620 for (i = 0; i < BLKG_IOSTAT_NR; i++) { 621 dst->bytes[i] = src->bytes[i]; 622 dst->ios[i] = src->ios[i]; 623 } 624 } 625 626 static void __blkg_clear_stat(struct blkg_iostat_set *bis) 627 { 628 struct blkg_iostat cur = {0}; 629 unsigned long flags; 630 631 flags = u64_stats_update_begin_irqsave(&bis->sync); 632 blkg_iostat_set(&bis->cur, &cur); 633 blkg_iostat_set(&bis->last, &cur); 634 u64_stats_update_end_irqrestore(&bis->sync, flags); 635 } 636 637 static void blkg_clear_stat(struct blkcg_gq *blkg) 638 { 639 int cpu; 640 641 for_each_possible_cpu(cpu) { 642 struct blkg_iostat_set *s = per_cpu_ptr(blkg->iostat_cpu, cpu); 643 644 __blkg_clear_stat(s); 645 } 646 __blkg_clear_stat(&blkg->iostat); 647 } 648 649 static int blkcg_reset_stats(struct cgroup_subsys_state *css, 650 struct cftype *cftype, u64 val) 651 { 652 struct blkcg *blkcg = css_to_blkcg(css); 653 struct blkcg_gq *blkg; 654 int i; 655 656 pr_info_once("blkio.%s is deprecated\n", cftype->name); 657 mutex_lock(&blkcg_pol_mutex); 658 spin_lock_irq(&blkcg->lock); 659 660 /* 661 * Note that stat reset is racy - it doesn't synchronize against 662 * stat updates. This is a debug feature which shouldn't exist 663 * anyway. If you get hit by a race, retry. 664 */ 665 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { 666 blkg_clear_stat(blkg); 667 for (i = 0; i < BLKCG_MAX_POLS; i++) { 668 struct blkcg_policy *pol = blkcg_policy[i]; 669 670 if (blkg->pd[i] && pol->pd_reset_stats_fn) 671 pol->pd_reset_stats_fn(blkg->pd[i]); 672 } 673 } 674 675 spin_unlock_irq(&blkcg->lock); 676 mutex_unlock(&blkcg_pol_mutex); 677 return 0; 678 } 679 680 const char *blkg_dev_name(struct blkcg_gq *blkg) 681 { 682 if (!blkg->q->disk) 683 return NULL; 684 return bdi_dev_name(blkg->q->disk->bdi); 685 } 686 687 /** 688 * blkcg_print_blkgs - helper for printing per-blkg data 689 * @sf: seq_file to print to 690 * @blkcg: blkcg of interest 691 * @prfill: fill function to print out a blkg 692 * @pol: policy in question 693 * @data: data to be passed to @prfill 694 * @show_total: to print out sum of prfill return values or not 695 * 696 * This function invokes @prfill on each blkg of @blkcg if pd for the 697 * policy specified by @pol exists. @prfill is invoked with @sf, the 698 * policy data and @data and the matching queue lock held. If @show_total 699 * is %true, the sum of the return values from @prfill is printed with 700 * "Total" label at the end. 701 * 702 * This is to be used to construct print functions for 703 * cftype->read_seq_string method. 704 */ 705 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, 706 u64 (*prfill)(struct seq_file *, 707 struct blkg_policy_data *, int), 708 const struct blkcg_policy *pol, int data, 709 bool show_total) 710 { 711 struct blkcg_gq *blkg; 712 u64 total = 0; 713 714 rcu_read_lock(); 715 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { 716 spin_lock_irq(&blkg->q->queue_lock); 717 if (blkcg_policy_enabled(blkg->q, pol)) 718 total += prfill(sf, blkg->pd[pol->plid], data); 719 spin_unlock_irq(&blkg->q->queue_lock); 720 } 721 rcu_read_unlock(); 722 723 if (show_total) 724 seq_printf(sf, "Total %llu\n", (unsigned long long)total); 725 } 726 EXPORT_SYMBOL_GPL(blkcg_print_blkgs); 727 728 /** 729 * __blkg_prfill_u64 - prfill helper for a single u64 value 730 * @sf: seq_file to print to 731 * @pd: policy private data of interest 732 * @v: value to print 733 * 734 * Print @v to @sf for the device associated with @pd. 735 */ 736 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) 737 { 738 const char *dname = blkg_dev_name(pd->blkg); 739 740 if (!dname) 741 return 0; 742 743 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); 744 return v; 745 } 746 EXPORT_SYMBOL_GPL(__blkg_prfill_u64); 747 748 /** 749 * blkg_conf_init - initialize a blkg_conf_ctx 750 * @ctx: blkg_conf_ctx to initialize 751 * @input: input string 752 * 753 * Initialize @ctx which can be used to parse blkg config input string @input. 754 * Once initialized, @ctx can be used with blkg_conf_open_bdev() and 755 * blkg_conf_prep(), and must be cleaned up with blkg_conf_exit(). 756 */ 757 void blkg_conf_init(struct blkg_conf_ctx *ctx, char *input) 758 { 759 *ctx = (struct blkg_conf_ctx){ .input = input }; 760 } 761 EXPORT_SYMBOL_GPL(blkg_conf_init); 762 763 /** 764 * blkg_conf_open_bdev - parse and open bdev for per-blkg config update 765 * @ctx: blkg_conf_ctx initialized with blkg_conf_init() 766 * 767 * Parse the device node prefix part, MAJ:MIN, of per-blkg config update from 768 * @ctx->input and get and store the matching bdev in @ctx->bdev. @ctx->body is 769 * set to point past the device node prefix. 770 * 771 * This function may be called multiple times on @ctx and the extra calls become 772 * NOOPs. blkg_conf_prep() implicitly calls this function. Use this function 773 * explicitly if bdev access is needed without resolving the blkcg / policy part 774 * of @ctx->input. Returns -errno on error. 775 */ 776 int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx) 777 { 778 char *input = ctx->input; 779 unsigned int major, minor; 780 struct block_device *bdev; 781 int key_len; 782 783 if (ctx->bdev) 784 return 0; 785 786 if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2) 787 return -EINVAL; 788 789 input += key_len; 790 if (!isspace(*input)) 791 return -EINVAL; 792 input = skip_spaces(input); 793 794 bdev = blkdev_get_no_open(MKDEV(major, minor), false); 795 if (!bdev) 796 return -ENODEV; 797 if (bdev_is_partition(bdev)) { 798 blkdev_put_no_open(bdev); 799 return -ENODEV; 800 } 801 802 mutex_lock(&bdev->bd_queue->rq_qos_mutex); 803 if (!disk_live(bdev->bd_disk)) { 804 blkdev_put_no_open(bdev); 805 mutex_unlock(&bdev->bd_queue->rq_qos_mutex); 806 return -ENODEV; 807 } 808 809 ctx->body = input; 810 ctx->bdev = bdev; 811 return 0; 812 } 813 /* 814 * Similar to blkg_conf_open_bdev, but additionally freezes the queue, 815 * acquires q->elevator_lock, and ensures the correct locking order 816 * between q->elevator_lock and q->rq_qos_mutex. 817 * 818 * This function returns negative error on failure. On success it returns 819 * memflags which must be saved and later passed to blkg_conf_exit_frozen 820 * for restoring the memalloc scope. 821 */ 822 unsigned long __must_check blkg_conf_open_bdev_frozen(struct blkg_conf_ctx *ctx) 823 { 824 int ret; 825 unsigned long memflags; 826 827 if (ctx->bdev) 828 return -EINVAL; 829 830 ret = blkg_conf_open_bdev(ctx); 831 if (ret < 0) 832 return ret; 833 /* 834 * At this point, we haven’t started protecting anything related to QoS, 835 * so we release q->rq_qos_mutex here, which was first acquired in blkg_ 836 * conf_open_bdev. Later, we re-acquire q->rq_qos_mutex after freezing 837 * the queue and acquiring q->elevator_lock to maintain the correct 838 * locking order. 839 */ 840 mutex_unlock(&ctx->bdev->bd_queue->rq_qos_mutex); 841 842 memflags = blk_mq_freeze_queue(ctx->bdev->bd_queue); 843 mutex_lock(&ctx->bdev->bd_queue->elevator_lock); 844 mutex_lock(&ctx->bdev->bd_queue->rq_qos_mutex); 845 846 return memflags; 847 } 848 849 /** 850 * blkg_conf_prep - parse and prepare for per-blkg config update 851 * @blkcg: target block cgroup 852 * @pol: target policy 853 * @ctx: blkg_conf_ctx initialized with blkg_conf_init() 854 * 855 * Parse per-blkg config update from @ctx->input and initialize @ctx 856 * accordingly. On success, @ctx->body points to the part of @ctx->input 857 * following MAJ:MIN, @ctx->bdev points to the target block device and 858 * @ctx->blkg to the blkg being configured. 859 * 860 * blkg_conf_open_bdev() may be called on @ctx beforehand. On success, this 861 * function returns with queue lock held and must be followed by 862 * blkg_conf_exit(). 863 */ 864 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, 865 struct blkg_conf_ctx *ctx) 866 __acquires(&bdev->bd_queue->queue_lock) 867 { 868 struct gendisk *disk; 869 struct request_queue *q; 870 struct blkcg_gq *blkg; 871 int ret; 872 873 ret = blkg_conf_open_bdev(ctx); 874 if (ret) 875 return ret; 876 877 disk = ctx->bdev->bd_disk; 878 q = disk->queue; 879 880 /* Prevent concurrent with blkcg_deactivate_policy() */ 881 mutex_lock(&q->blkcg_mutex); 882 spin_lock_irq(&q->queue_lock); 883 884 if (!blkcg_policy_enabled(q, pol)) { 885 ret = -EOPNOTSUPP; 886 goto fail_unlock; 887 } 888 889 blkg = blkg_lookup(blkcg, q); 890 if (blkg) 891 goto success; 892 893 /* 894 * Create blkgs walking down from blkcg_root to @blkcg, so that all 895 * non-root blkgs have access to their parents. 896 */ 897 while (true) { 898 struct blkcg *pos = blkcg; 899 struct blkcg *parent; 900 struct blkcg_gq *new_blkg; 901 902 parent = blkcg_parent(blkcg); 903 while (parent && !blkg_lookup(parent, q)) { 904 pos = parent; 905 parent = blkcg_parent(parent); 906 } 907 908 /* Drop locks to do new blkg allocation with GFP_KERNEL. */ 909 spin_unlock_irq(&q->queue_lock); 910 911 new_blkg = blkg_alloc(pos, disk, GFP_NOIO); 912 if (unlikely(!new_blkg)) { 913 ret = -ENOMEM; 914 goto fail_exit; 915 } 916 917 if (radix_tree_preload(GFP_KERNEL)) { 918 blkg_free(new_blkg); 919 ret = -ENOMEM; 920 goto fail_exit; 921 } 922 923 spin_lock_irq(&q->queue_lock); 924 925 if (!blkcg_policy_enabled(q, pol)) { 926 blkg_free(new_blkg); 927 ret = -EOPNOTSUPP; 928 goto fail_preloaded; 929 } 930 931 blkg = blkg_lookup(pos, q); 932 if (blkg) { 933 blkg_free(new_blkg); 934 } else { 935 blkg = blkg_create(pos, disk, new_blkg); 936 if (IS_ERR(blkg)) { 937 ret = PTR_ERR(blkg); 938 goto fail_preloaded; 939 } 940 } 941 942 radix_tree_preload_end(); 943 944 if (pos == blkcg) 945 goto success; 946 } 947 success: 948 mutex_unlock(&q->blkcg_mutex); 949 ctx->blkg = blkg; 950 return 0; 951 952 fail_preloaded: 953 radix_tree_preload_end(); 954 fail_unlock: 955 spin_unlock_irq(&q->queue_lock); 956 fail_exit: 957 mutex_unlock(&q->blkcg_mutex); 958 /* 959 * If queue was bypassing, we should retry. Do so after a 960 * short msleep(). It isn't strictly necessary but queue 961 * can be bypassing for some time and it's always nice to 962 * avoid busy looping. 963 */ 964 if (ret == -EBUSY) { 965 msleep(10); 966 ret = restart_syscall(); 967 } 968 return ret; 969 } 970 EXPORT_SYMBOL_GPL(blkg_conf_prep); 971 972 /** 973 * blkg_conf_exit - clean up per-blkg config update 974 * @ctx: blkg_conf_ctx initialized with blkg_conf_init() 975 * 976 * Clean up after per-blkg config update. This function must be called on all 977 * blkg_conf_ctx's initialized with blkg_conf_init(). 978 */ 979 void blkg_conf_exit(struct blkg_conf_ctx *ctx) 980 __releases(&ctx->bdev->bd_queue->queue_lock) 981 __releases(&ctx->bdev->bd_queue->rq_qos_mutex) 982 { 983 if (ctx->blkg) { 984 spin_unlock_irq(&bdev_get_queue(ctx->bdev)->queue_lock); 985 ctx->blkg = NULL; 986 } 987 988 if (ctx->bdev) { 989 mutex_unlock(&ctx->bdev->bd_queue->rq_qos_mutex); 990 blkdev_put_no_open(ctx->bdev); 991 ctx->body = NULL; 992 ctx->bdev = NULL; 993 } 994 } 995 EXPORT_SYMBOL_GPL(blkg_conf_exit); 996 997 /* 998 * Similar to blkg_conf_exit, but also unfreezes the queue and releases 999 * q->elevator_lock. Should be used when blkg_conf_open_bdev_frozen 1000 * is used to open the bdev. 1001 */ 1002 void blkg_conf_exit_frozen(struct blkg_conf_ctx *ctx, unsigned long memflags) 1003 { 1004 if (ctx->bdev) { 1005 struct request_queue *q = ctx->bdev->bd_queue; 1006 1007 blkg_conf_exit(ctx); 1008 mutex_unlock(&q->elevator_lock); 1009 blk_mq_unfreeze_queue(q, memflags); 1010 } 1011 } 1012 1013 static void blkg_iostat_add(struct blkg_iostat *dst, struct blkg_iostat *src) 1014 { 1015 int i; 1016 1017 for (i = 0; i < BLKG_IOSTAT_NR; i++) { 1018 dst->bytes[i] += src->bytes[i]; 1019 dst->ios[i] += src->ios[i]; 1020 } 1021 } 1022 1023 static void blkg_iostat_sub(struct blkg_iostat *dst, struct blkg_iostat *src) 1024 { 1025 int i; 1026 1027 for (i = 0; i < BLKG_IOSTAT_NR; i++) { 1028 dst->bytes[i] -= src->bytes[i]; 1029 dst->ios[i] -= src->ios[i]; 1030 } 1031 } 1032 1033 static void blkcg_iostat_update(struct blkcg_gq *blkg, struct blkg_iostat *cur, 1034 struct blkg_iostat *last) 1035 { 1036 struct blkg_iostat delta; 1037 unsigned long flags; 1038 1039 /* propagate percpu delta to global */ 1040 flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); 1041 blkg_iostat_set(&delta, cur); 1042 blkg_iostat_sub(&delta, last); 1043 blkg_iostat_add(&blkg->iostat.cur, &delta); 1044 blkg_iostat_add(last, &delta); 1045 u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); 1046 } 1047 1048 static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu) 1049 { 1050 struct llist_head *lhead = per_cpu_ptr(blkcg->lhead, cpu); 1051 struct llist_node *lnode; 1052 struct blkg_iostat_set *bisc, *next_bisc; 1053 unsigned long flags; 1054 1055 rcu_read_lock(); 1056 1057 lnode = llist_del_all(lhead); 1058 if (!lnode) 1059 goto out; 1060 1061 /* 1062 * For covering concurrent parent blkg update from blkg_release(). 1063 * 1064 * When flushing from cgroup, the subsystem rstat lock is always held, 1065 * so this lock won't cause contention most of time. 1066 */ 1067 raw_spin_lock_irqsave(&blkg_stat_lock, flags); 1068 1069 /* 1070 * Iterate only the iostat_cpu's queued in the lockless list. 1071 */ 1072 llist_for_each_entry_safe(bisc, next_bisc, lnode, lnode) { 1073 struct blkcg_gq *blkg = bisc->blkg; 1074 struct blkcg_gq *parent = blkg->parent; 1075 struct blkg_iostat cur; 1076 unsigned int seq; 1077 1078 /* 1079 * Order assignment of `next_bisc` from `bisc->lnode.next` in 1080 * llist_for_each_entry_safe and clearing `bisc->lqueued` for 1081 * avoiding to assign `next_bisc` with new next pointer added 1082 * in blk_cgroup_bio_start() in case of re-ordering. 1083 * 1084 * The pair barrier is implied in llist_add() in blk_cgroup_bio_start(). 1085 */ 1086 smp_mb(); 1087 1088 WRITE_ONCE(bisc->lqueued, false); 1089 if (bisc == &blkg->iostat) 1090 goto propagate_up; /* propagate up to parent only */ 1091 1092 /* fetch the current per-cpu values */ 1093 do { 1094 seq = u64_stats_fetch_begin(&bisc->sync); 1095 blkg_iostat_set(&cur, &bisc->cur); 1096 } while (u64_stats_fetch_retry(&bisc->sync, seq)); 1097 1098 blkcg_iostat_update(blkg, &cur, &bisc->last); 1099 1100 propagate_up: 1101 /* propagate global delta to parent (unless that's root) */ 1102 if (parent && parent->parent) { 1103 blkcg_iostat_update(parent, &blkg->iostat.cur, 1104 &blkg->iostat.last); 1105 /* 1106 * Queue parent->iostat to its blkcg's lockless 1107 * list to propagate up to the grandparent if the 1108 * iostat hasn't been queued yet. 1109 */ 1110 if (!parent->iostat.lqueued) { 1111 struct llist_head *plhead; 1112 1113 plhead = per_cpu_ptr(parent->blkcg->lhead, cpu); 1114 llist_add(&parent->iostat.lnode, plhead); 1115 parent->iostat.lqueued = true; 1116 } 1117 } 1118 } 1119 raw_spin_unlock_irqrestore(&blkg_stat_lock, flags); 1120 out: 1121 rcu_read_unlock(); 1122 } 1123 1124 static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu) 1125 { 1126 /* Root-level stats are sourced from system-wide IO stats */ 1127 if (cgroup_parent(css->cgroup)) 1128 __blkcg_rstat_flush(css_to_blkcg(css), cpu); 1129 } 1130 1131 /* 1132 * We source root cgroup stats from the system-wide stats to avoid 1133 * tracking the same information twice and incurring overhead when no 1134 * cgroups are defined. For that reason, css_rstat_flush in 1135 * blkcg_print_stat does not actually fill out the iostat in the root 1136 * cgroup's blkcg_gq. 1137 * 1138 * However, we would like to re-use the printing code between the root and 1139 * non-root cgroups to the extent possible. For that reason, we simulate 1140 * flushing the root cgroup's stats by explicitly filling in the iostat 1141 * with disk level statistics. 1142 */ 1143 static void blkcg_fill_root_iostats(void) 1144 { 1145 struct class_dev_iter iter; 1146 struct device *dev; 1147 1148 class_dev_iter_init(&iter, &block_class, NULL, &disk_type); 1149 while ((dev = class_dev_iter_next(&iter))) { 1150 struct block_device *bdev = dev_to_bdev(dev); 1151 struct blkcg_gq *blkg = bdev->bd_disk->queue->root_blkg; 1152 struct blkg_iostat tmp; 1153 int cpu; 1154 unsigned long flags; 1155 1156 memset(&tmp, 0, sizeof(tmp)); 1157 for_each_possible_cpu(cpu) { 1158 struct disk_stats *cpu_dkstats; 1159 1160 cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu); 1161 tmp.ios[BLKG_IOSTAT_READ] += 1162 cpu_dkstats->ios[STAT_READ]; 1163 tmp.ios[BLKG_IOSTAT_WRITE] += 1164 cpu_dkstats->ios[STAT_WRITE]; 1165 tmp.ios[BLKG_IOSTAT_DISCARD] += 1166 cpu_dkstats->ios[STAT_DISCARD]; 1167 // convert sectors to bytes 1168 tmp.bytes[BLKG_IOSTAT_READ] += 1169 cpu_dkstats->sectors[STAT_READ] << 9; 1170 tmp.bytes[BLKG_IOSTAT_WRITE] += 1171 cpu_dkstats->sectors[STAT_WRITE] << 9; 1172 tmp.bytes[BLKG_IOSTAT_DISCARD] += 1173 cpu_dkstats->sectors[STAT_DISCARD] << 9; 1174 } 1175 1176 flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); 1177 blkg_iostat_set(&blkg->iostat.cur, &tmp); 1178 u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); 1179 } 1180 class_dev_iter_exit(&iter); 1181 } 1182 1183 static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s) 1184 { 1185 struct blkg_iostat_set *bis = &blkg->iostat; 1186 u64 rbytes, wbytes, rios, wios, dbytes, dios; 1187 const char *dname; 1188 unsigned seq; 1189 int i; 1190 1191 if (!blkg->online) 1192 return; 1193 1194 dname = blkg_dev_name(blkg); 1195 if (!dname) 1196 return; 1197 1198 seq_printf(s, "%s ", dname); 1199 1200 do { 1201 seq = u64_stats_fetch_begin(&bis->sync); 1202 1203 rbytes = bis->cur.bytes[BLKG_IOSTAT_READ]; 1204 wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE]; 1205 dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD]; 1206 rios = bis->cur.ios[BLKG_IOSTAT_READ]; 1207 wios = bis->cur.ios[BLKG_IOSTAT_WRITE]; 1208 dios = bis->cur.ios[BLKG_IOSTAT_DISCARD]; 1209 } while (u64_stats_fetch_retry(&bis->sync, seq)); 1210 1211 if (rbytes || wbytes || rios || wios) { 1212 seq_printf(s, "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu", 1213 rbytes, wbytes, rios, wios, 1214 dbytes, dios); 1215 } 1216 1217 if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) { 1218 seq_printf(s, " use_delay=%d delay_nsec=%llu", 1219 atomic_read(&blkg->use_delay), 1220 atomic64_read(&blkg->delay_nsec)); 1221 } 1222 1223 for (i = 0; i < BLKCG_MAX_POLS; i++) { 1224 struct blkcg_policy *pol = blkcg_policy[i]; 1225 1226 if (!blkg->pd[i] || !pol->pd_stat_fn) 1227 continue; 1228 1229 pol->pd_stat_fn(blkg->pd[i], s); 1230 } 1231 1232 seq_puts(s, "\n"); 1233 } 1234 1235 static int blkcg_print_stat(struct seq_file *sf, void *v) 1236 { 1237 struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); 1238 struct blkcg_gq *blkg; 1239 1240 if (!seq_css(sf)->parent) 1241 blkcg_fill_root_iostats(); 1242 else 1243 css_rstat_flush(&blkcg->css); 1244 1245 rcu_read_lock(); 1246 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { 1247 spin_lock_irq(&blkg->q->queue_lock); 1248 blkcg_print_one_stat(blkg, sf); 1249 spin_unlock_irq(&blkg->q->queue_lock); 1250 } 1251 rcu_read_unlock(); 1252 return 0; 1253 } 1254 1255 static struct cftype blkcg_files[] = { 1256 { 1257 .name = "stat", 1258 .seq_show = blkcg_print_stat, 1259 }, 1260 { } /* terminate */ 1261 }; 1262 1263 static struct cftype blkcg_legacy_files[] = { 1264 { 1265 .name = "reset_stats", 1266 .write_u64 = blkcg_reset_stats, 1267 }, 1268 { } /* terminate */ 1269 }; 1270 1271 #ifdef CONFIG_CGROUP_WRITEBACK 1272 struct list_head *blkcg_get_cgwb_list(struct cgroup_subsys_state *css) 1273 { 1274 return &css_to_blkcg(css)->cgwb_list; 1275 } 1276 #endif 1277 1278 /* 1279 * blkcg destruction is a three-stage process. 1280 * 1281 * 1. Destruction starts. The blkcg_css_offline() callback is invoked 1282 * which offlines writeback. Here we tie the next stage of blkg destruction 1283 * to the completion of writeback associated with the blkcg. This lets us 1284 * avoid punting potentially large amounts of outstanding writeback to root 1285 * while maintaining any ongoing policies. The next stage is triggered when 1286 * the nr_cgwbs count goes to zero. 1287 * 1288 * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called 1289 * and handles the destruction of blkgs. Here the css reference held by 1290 * the blkg is put back eventually allowing blkcg_css_free() to be called. 1291 * This work may occur in cgwb_release_workfn() on the cgwb_release 1292 * workqueue. Any submitted ios that fail to get the blkg ref will be 1293 * punted to the root_blkg. 1294 * 1295 * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called. 1296 * This finally frees the blkcg. 1297 */ 1298 1299 /** 1300 * blkcg_destroy_blkgs - responsible for shooting down blkgs 1301 * @blkcg: blkcg of interest 1302 * 1303 * blkgs should be removed while holding both q and blkcg locks. As blkcg lock 1304 * is nested inside q lock, this function performs reverse double lock dancing. 1305 * Destroying the blkgs releases the reference held on the blkcg's css allowing 1306 * blkcg_css_free to eventually be called. 1307 * 1308 * This is the blkcg counterpart of ioc_release_fn(). 1309 */ 1310 static void blkcg_destroy_blkgs(struct blkcg *blkcg) 1311 { 1312 might_sleep(); 1313 1314 spin_lock_irq(&blkcg->lock); 1315 1316 while (!hlist_empty(&blkcg->blkg_list)) { 1317 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, 1318 struct blkcg_gq, blkcg_node); 1319 struct request_queue *q = blkg->q; 1320 1321 if (need_resched() || !spin_trylock(&q->queue_lock)) { 1322 /* 1323 * Given that the system can accumulate a huge number 1324 * of blkgs in pathological cases, check to see if we 1325 * need to rescheduling to avoid softlockup. 1326 */ 1327 spin_unlock_irq(&blkcg->lock); 1328 cond_resched(); 1329 spin_lock_irq(&blkcg->lock); 1330 continue; 1331 } 1332 1333 blkg_destroy(blkg); 1334 spin_unlock(&q->queue_lock); 1335 } 1336 1337 spin_unlock_irq(&blkcg->lock); 1338 } 1339 1340 /** 1341 * blkcg_pin_online - pin online state 1342 * @blkcg_css: blkcg of interest 1343 * 1344 * While pinned, a blkcg is kept online. This is primarily used to 1345 * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline 1346 * while an associated cgwb is still active. 1347 */ 1348 void blkcg_pin_online(struct cgroup_subsys_state *blkcg_css) 1349 { 1350 refcount_inc(&css_to_blkcg(blkcg_css)->online_pin); 1351 } 1352 1353 /** 1354 * blkcg_unpin_online - unpin online state 1355 * @blkcg_css: blkcg of interest 1356 * 1357 * This is primarily used to impedance-match blkg and cgwb lifetimes so 1358 * that blkg doesn't go offline while an associated cgwb is still active. 1359 * When this count goes to zero, all active cgwbs have finished so the 1360 * blkcg can continue destruction by calling blkcg_destroy_blkgs(). 1361 */ 1362 void blkcg_unpin_online(struct cgroup_subsys_state *blkcg_css) 1363 { 1364 struct blkcg *blkcg = css_to_blkcg(blkcg_css); 1365 1366 do { 1367 struct blkcg *parent; 1368 1369 if (!refcount_dec_and_test(&blkcg->online_pin)) 1370 break; 1371 1372 parent = blkcg_parent(blkcg); 1373 blkcg_destroy_blkgs(blkcg); 1374 blkcg = parent; 1375 } while (blkcg); 1376 } 1377 1378 /** 1379 * blkcg_css_offline - cgroup css_offline callback 1380 * @css: css of interest 1381 * 1382 * This function is called when @css is about to go away. Here the cgwbs are 1383 * offlined first and only once writeback associated with the blkcg has 1384 * finished do we start step 2 (see above). 1385 */ 1386 static void blkcg_css_offline(struct cgroup_subsys_state *css) 1387 { 1388 /* this prevents anyone from attaching or migrating to this blkcg */ 1389 wb_blkcg_offline(css); 1390 1391 /* put the base online pin allowing step 2 to be triggered */ 1392 blkcg_unpin_online(css); 1393 } 1394 1395 static void blkcg_css_free(struct cgroup_subsys_state *css) 1396 { 1397 struct blkcg *blkcg = css_to_blkcg(css); 1398 int i; 1399 1400 mutex_lock(&blkcg_pol_mutex); 1401 1402 list_del(&blkcg->all_blkcgs_node); 1403 1404 for (i = 0; i < BLKCG_MAX_POLS; i++) 1405 if (blkcg->cpd[i]) 1406 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); 1407 1408 mutex_unlock(&blkcg_pol_mutex); 1409 1410 free_percpu(blkcg->lhead); 1411 kfree(blkcg); 1412 } 1413 1414 static struct cgroup_subsys_state * 1415 blkcg_css_alloc(struct cgroup_subsys_state *parent_css) 1416 { 1417 struct blkcg *blkcg; 1418 int i; 1419 1420 mutex_lock(&blkcg_pol_mutex); 1421 1422 if (!parent_css) { 1423 blkcg = &blkcg_root; 1424 } else { 1425 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); 1426 if (!blkcg) 1427 goto unlock; 1428 } 1429 1430 if (init_blkcg_llists(blkcg)) 1431 goto free_blkcg; 1432 1433 for (i = 0; i < BLKCG_MAX_POLS ; i++) { 1434 struct blkcg_policy *pol = blkcg_policy[i]; 1435 struct blkcg_policy_data *cpd; 1436 1437 /* 1438 * If the policy hasn't been attached yet, wait for it 1439 * to be attached before doing anything else. Otherwise, 1440 * check if the policy requires any specific per-cgroup 1441 * data: if it does, allocate and initialize it. 1442 */ 1443 if (!pol || !pol->cpd_alloc_fn) 1444 continue; 1445 1446 cpd = pol->cpd_alloc_fn(GFP_KERNEL); 1447 if (!cpd) 1448 goto free_pd_blkcg; 1449 1450 blkcg->cpd[i] = cpd; 1451 cpd->blkcg = blkcg; 1452 cpd->plid = i; 1453 } 1454 1455 spin_lock_init(&blkcg->lock); 1456 refcount_set(&blkcg->online_pin, 1); 1457 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT); 1458 INIT_HLIST_HEAD(&blkcg->blkg_list); 1459 #ifdef CONFIG_CGROUP_WRITEBACK 1460 INIT_LIST_HEAD(&blkcg->cgwb_list); 1461 #endif 1462 list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs); 1463 1464 mutex_unlock(&blkcg_pol_mutex); 1465 return &blkcg->css; 1466 1467 free_pd_blkcg: 1468 for (i--; i >= 0; i--) 1469 if (blkcg->cpd[i]) 1470 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); 1471 free_percpu(blkcg->lhead); 1472 free_blkcg: 1473 if (blkcg != &blkcg_root) 1474 kfree(blkcg); 1475 unlock: 1476 mutex_unlock(&blkcg_pol_mutex); 1477 return ERR_PTR(-ENOMEM); 1478 } 1479 1480 static int blkcg_css_online(struct cgroup_subsys_state *css) 1481 { 1482 struct blkcg *parent = blkcg_parent(css_to_blkcg(css)); 1483 1484 /* 1485 * blkcg_pin_online() is used to delay blkcg offline so that blkgs 1486 * don't go offline while cgwbs are still active on them. Pin the 1487 * parent so that offline always happens towards the root. 1488 */ 1489 if (parent) 1490 blkcg_pin_online(&parent->css); 1491 return 0; 1492 } 1493 1494 void blkg_init_queue(struct request_queue *q) 1495 { 1496 INIT_LIST_HEAD(&q->blkg_list); 1497 mutex_init(&q->blkcg_mutex); 1498 } 1499 1500 int blkcg_init_disk(struct gendisk *disk) 1501 { 1502 struct request_queue *q = disk->queue; 1503 struct blkcg_gq *new_blkg, *blkg; 1504 bool preloaded; 1505 1506 new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL); 1507 if (!new_blkg) 1508 return -ENOMEM; 1509 1510 preloaded = !radix_tree_preload(GFP_KERNEL); 1511 1512 /* Make sure the root blkg exists. */ 1513 /* spin_lock_irq can serve as RCU read-side critical section. */ 1514 spin_lock_irq(&q->queue_lock); 1515 blkg = blkg_create(&blkcg_root, disk, new_blkg); 1516 if (IS_ERR(blkg)) 1517 goto err_unlock; 1518 q->root_blkg = blkg; 1519 spin_unlock_irq(&q->queue_lock); 1520 1521 if (preloaded) 1522 radix_tree_preload_end(); 1523 1524 return 0; 1525 1526 err_unlock: 1527 spin_unlock_irq(&q->queue_lock); 1528 if (preloaded) 1529 radix_tree_preload_end(); 1530 return PTR_ERR(blkg); 1531 } 1532 1533 void blkcg_exit_disk(struct gendisk *disk) 1534 { 1535 blkg_destroy_all(disk); 1536 blk_throtl_exit(disk); 1537 } 1538 1539 static void blkcg_exit(struct task_struct *tsk) 1540 { 1541 if (tsk->throttle_disk) 1542 put_disk(tsk->throttle_disk); 1543 tsk->throttle_disk = NULL; 1544 } 1545 1546 struct cgroup_subsys io_cgrp_subsys = { 1547 .css_alloc = blkcg_css_alloc, 1548 .css_online = blkcg_css_online, 1549 .css_offline = blkcg_css_offline, 1550 .css_free = blkcg_css_free, 1551 .css_rstat_flush = blkcg_rstat_flush, 1552 .dfl_cftypes = blkcg_files, 1553 .legacy_cftypes = blkcg_legacy_files, 1554 .legacy_name = "blkio", 1555 .exit = blkcg_exit, 1556 #ifdef CONFIG_MEMCG 1557 /* 1558 * This ensures that, if available, memcg is automatically enabled 1559 * together on the default hierarchy so that the owner cgroup can 1560 * be retrieved from writeback pages. 1561 */ 1562 .depends_on = 1 << memory_cgrp_id, 1563 #endif 1564 }; 1565 EXPORT_SYMBOL_GPL(io_cgrp_subsys); 1566 1567 /** 1568 * blkcg_activate_policy - activate a blkcg policy on a gendisk 1569 * @disk: gendisk of interest 1570 * @pol: blkcg policy to activate 1571 * 1572 * Activate @pol on @disk. Requires %GFP_KERNEL context. @disk goes through 1573 * bypass mode to populate its blkgs with policy_data for @pol. 1574 * 1575 * Activation happens with @disk bypassed, so nobody would be accessing blkgs 1576 * from IO path. Update of each blkg is protected by both queue and blkcg 1577 * locks so that holding either lock and testing blkcg_policy_enabled() is 1578 * always enough for dereferencing policy data. 1579 * 1580 * The caller is responsible for synchronizing [de]activations and policy 1581 * [un]registerations. Returns 0 on success, -errno on failure. 1582 */ 1583 int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol) 1584 { 1585 struct request_queue *q = disk->queue; 1586 struct blkg_policy_data *pd_prealloc = NULL; 1587 struct blkcg_gq *blkg, *pinned_blkg = NULL; 1588 unsigned int memflags; 1589 int ret; 1590 1591 if (blkcg_policy_enabled(q, pol)) 1592 return 0; 1593 1594 /* 1595 * Policy is allowed to be registered without pd_alloc_fn/pd_free_fn, 1596 * for example, ioprio. Such policy will work on blkcg level, not disk 1597 * level, and don't need to be activated. 1598 */ 1599 if (WARN_ON_ONCE(!pol->pd_alloc_fn || !pol->pd_free_fn)) 1600 return -EINVAL; 1601 1602 if (queue_is_mq(q)) 1603 memflags = blk_mq_freeze_queue(q); 1604 retry: 1605 spin_lock_irq(&q->queue_lock); 1606 1607 /* blkg_list is pushed at the head, reverse walk to initialize parents first */ 1608 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) { 1609 struct blkg_policy_data *pd; 1610 1611 if (blkg->pd[pol->plid]) 1612 continue; 1613 1614 /* If prealloc matches, use it; otherwise try GFP_NOWAIT */ 1615 if (blkg == pinned_blkg) { 1616 pd = pd_prealloc; 1617 pd_prealloc = NULL; 1618 } else { 1619 pd = pol->pd_alloc_fn(disk, blkg->blkcg, 1620 GFP_NOWAIT); 1621 } 1622 1623 if (!pd) { 1624 /* 1625 * GFP_NOWAIT failed. Free the existing one and 1626 * prealloc for @blkg w/ GFP_KERNEL. 1627 */ 1628 if (pinned_blkg) 1629 blkg_put(pinned_blkg); 1630 blkg_get(blkg); 1631 pinned_blkg = blkg; 1632 1633 spin_unlock_irq(&q->queue_lock); 1634 1635 if (pd_prealloc) 1636 pol->pd_free_fn(pd_prealloc); 1637 pd_prealloc = pol->pd_alloc_fn(disk, blkg->blkcg, 1638 GFP_KERNEL); 1639 if (pd_prealloc) 1640 goto retry; 1641 else 1642 goto enomem; 1643 } 1644 1645 spin_lock(&blkg->blkcg->lock); 1646 1647 pd->blkg = blkg; 1648 pd->plid = pol->plid; 1649 blkg->pd[pol->plid] = pd; 1650 1651 if (pol->pd_init_fn) 1652 pol->pd_init_fn(pd); 1653 1654 if (pol->pd_online_fn) 1655 pol->pd_online_fn(pd); 1656 pd->online = true; 1657 1658 spin_unlock(&blkg->blkcg->lock); 1659 } 1660 1661 __set_bit(pol->plid, q->blkcg_pols); 1662 ret = 0; 1663 1664 spin_unlock_irq(&q->queue_lock); 1665 out: 1666 if (queue_is_mq(q)) 1667 blk_mq_unfreeze_queue(q, memflags); 1668 if (pinned_blkg) 1669 blkg_put(pinned_blkg); 1670 if (pd_prealloc) 1671 pol->pd_free_fn(pd_prealloc); 1672 return ret; 1673 1674 enomem: 1675 /* alloc failed, take down everything */ 1676 spin_lock_irq(&q->queue_lock); 1677 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1678 struct blkcg *blkcg = blkg->blkcg; 1679 struct blkg_policy_data *pd; 1680 1681 spin_lock(&blkcg->lock); 1682 pd = blkg->pd[pol->plid]; 1683 if (pd) { 1684 if (pd->online && pol->pd_offline_fn) 1685 pol->pd_offline_fn(pd); 1686 pd->online = false; 1687 pol->pd_free_fn(pd); 1688 blkg->pd[pol->plid] = NULL; 1689 } 1690 spin_unlock(&blkcg->lock); 1691 } 1692 spin_unlock_irq(&q->queue_lock); 1693 ret = -ENOMEM; 1694 goto out; 1695 } 1696 EXPORT_SYMBOL_GPL(blkcg_activate_policy); 1697 1698 /** 1699 * blkcg_deactivate_policy - deactivate a blkcg policy on a gendisk 1700 * @disk: gendisk of interest 1701 * @pol: blkcg policy to deactivate 1702 * 1703 * Deactivate @pol on @disk. Follows the same synchronization rules as 1704 * blkcg_activate_policy(). 1705 */ 1706 void blkcg_deactivate_policy(struct gendisk *disk, 1707 const struct blkcg_policy *pol) 1708 { 1709 struct request_queue *q = disk->queue; 1710 struct blkcg_gq *blkg; 1711 unsigned int memflags; 1712 1713 if (!blkcg_policy_enabled(q, pol)) 1714 return; 1715 1716 if (queue_is_mq(q)) 1717 memflags = blk_mq_freeze_queue(q); 1718 1719 mutex_lock(&q->blkcg_mutex); 1720 spin_lock_irq(&q->queue_lock); 1721 1722 __clear_bit(pol->plid, q->blkcg_pols); 1723 1724 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1725 struct blkcg *blkcg = blkg->blkcg; 1726 1727 spin_lock(&blkcg->lock); 1728 if (blkg->pd[pol->plid]) { 1729 if (blkg->pd[pol->plid]->online && pol->pd_offline_fn) 1730 pol->pd_offline_fn(blkg->pd[pol->plid]); 1731 pol->pd_free_fn(blkg->pd[pol->plid]); 1732 blkg->pd[pol->plid] = NULL; 1733 } 1734 spin_unlock(&blkcg->lock); 1735 } 1736 1737 spin_unlock_irq(&q->queue_lock); 1738 mutex_unlock(&q->blkcg_mutex); 1739 1740 if (queue_is_mq(q)) 1741 blk_mq_unfreeze_queue(q, memflags); 1742 } 1743 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); 1744 1745 static void blkcg_free_all_cpd(struct blkcg_policy *pol) 1746 { 1747 struct blkcg *blkcg; 1748 1749 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { 1750 if (blkcg->cpd[pol->plid]) { 1751 pol->cpd_free_fn(blkcg->cpd[pol->plid]); 1752 blkcg->cpd[pol->plid] = NULL; 1753 } 1754 } 1755 } 1756 1757 /** 1758 * blkcg_policy_register - register a blkcg policy 1759 * @pol: blkcg policy to register 1760 * 1761 * Register @pol with blkcg core. Might sleep and @pol may be modified on 1762 * successful registration. Returns 0 on success and -errno on failure. 1763 */ 1764 int blkcg_policy_register(struct blkcg_policy *pol) 1765 { 1766 struct blkcg *blkcg; 1767 int i, ret; 1768 1769 /* 1770 * Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs, and policy 1771 * without pd_alloc_fn/pd_free_fn can't be activated. 1772 */ 1773 if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) || 1774 (!pol->pd_alloc_fn ^ !pol->pd_free_fn)) 1775 return -EINVAL; 1776 1777 mutex_lock(&blkcg_pol_register_mutex); 1778 mutex_lock(&blkcg_pol_mutex); 1779 1780 /* find an empty slot */ 1781 for (i = 0; i < BLKCG_MAX_POLS; i++) 1782 if (!blkcg_policy[i]) 1783 break; 1784 if (i >= BLKCG_MAX_POLS) { 1785 pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n"); 1786 ret = -ENOSPC; 1787 goto err_unlock; 1788 } 1789 1790 /* register @pol */ 1791 pol->plid = i; 1792 blkcg_policy[pol->plid] = pol; 1793 1794 /* allocate and install cpd's */ 1795 if (pol->cpd_alloc_fn) { 1796 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { 1797 struct blkcg_policy_data *cpd; 1798 1799 cpd = pol->cpd_alloc_fn(GFP_KERNEL); 1800 if (!cpd) { 1801 ret = -ENOMEM; 1802 goto err_free_cpds; 1803 } 1804 1805 blkcg->cpd[pol->plid] = cpd; 1806 cpd->blkcg = blkcg; 1807 cpd->plid = pol->plid; 1808 } 1809 } 1810 1811 mutex_unlock(&blkcg_pol_mutex); 1812 1813 /* everything is in place, add intf files for the new policy */ 1814 if (pol->dfl_cftypes == pol->legacy_cftypes) { 1815 WARN_ON(cgroup_add_cftypes(&io_cgrp_subsys, 1816 pol->dfl_cftypes)); 1817 } else { 1818 WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys, 1819 pol->dfl_cftypes)); 1820 WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys, 1821 pol->legacy_cftypes)); 1822 } 1823 mutex_unlock(&blkcg_pol_register_mutex); 1824 return 0; 1825 1826 err_free_cpds: 1827 if (pol->cpd_free_fn) 1828 blkcg_free_all_cpd(pol); 1829 1830 blkcg_policy[pol->plid] = NULL; 1831 err_unlock: 1832 mutex_unlock(&blkcg_pol_mutex); 1833 mutex_unlock(&blkcg_pol_register_mutex); 1834 return ret; 1835 } 1836 EXPORT_SYMBOL_GPL(blkcg_policy_register); 1837 1838 /** 1839 * blkcg_policy_unregister - unregister a blkcg policy 1840 * @pol: blkcg policy to unregister 1841 * 1842 * Undo blkcg_policy_register(@pol). Might sleep. 1843 */ 1844 void blkcg_policy_unregister(struct blkcg_policy *pol) 1845 { 1846 mutex_lock(&blkcg_pol_register_mutex); 1847 1848 if (WARN_ON(blkcg_policy[pol->plid] != pol)) 1849 goto out_unlock; 1850 1851 /* kill the intf files first */ 1852 if (pol->dfl_cftypes) 1853 cgroup_rm_cftypes(pol->dfl_cftypes); 1854 if (pol->legacy_cftypes) 1855 cgroup_rm_cftypes(pol->legacy_cftypes); 1856 1857 /* remove cpds and unregister */ 1858 mutex_lock(&blkcg_pol_mutex); 1859 1860 if (pol->cpd_free_fn) 1861 blkcg_free_all_cpd(pol); 1862 1863 blkcg_policy[pol->plid] = NULL; 1864 1865 mutex_unlock(&blkcg_pol_mutex); 1866 out_unlock: 1867 mutex_unlock(&blkcg_pol_register_mutex); 1868 } 1869 EXPORT_SYMBOL_GPL(blkcg_policy_unregister); 1870 1871 /* 1872 * Scale the accumulated delay based on how long it has been since we updated 1873 * the delay. We only call this when we are adding delay, in case it's been a 1874 * while since we added delay, and when we are checking to see if we need to 1875 * delay a task, to account for any delays that may have occurred. 1876 */ 1877 static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now) 1878 { 1879 u64 old = atomic64_read(&blkg->delay_start); 1880 1881 /* negative use_delay means no scaling, see blkcg_set_delay() */ 1882 if (atomic_read(&blkg->use_delay) < 0) 1883 return; 1884 1885 /* 1886 * We only want to scale down every second. The idea here is that we 1887 * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain 1888 * time window. We only want to throttle tasks for recent delay that 1889 * has occurred, in 1 second time windows since that's the maximum 1890 * things can be throttled. We save the current delay window in 1891 * blkg->last_delay so we know what amount is still left to be charged 1892 * to the blkg from this point onward. blkg->last_use keeps track of 1893 * the use_delay counter. The idea is if we're unthrottling the blkg we 1894 * are ok with whatever is happening now, and we can take away more of 1895 * the accumulated delay as we've already throttled enough that 1896 * everybody is happy with their IO latencies. 1897 */ 1898 if (time_before64(old + NSEC_PER_SEC, now) && 1899 atomic64_try_cmpxchg(&blkg->delay_start, &old, now)) { 1900 u64 cur = atomic64_read(&blkg->delay_nsec); 1901 u64 sub = min_t(u64, blkg->last_delay, now - old); 1902 int cur_use = atomic_read(&blkg->use_delay); 1903 1904 /* 1905 * We've been unthrottled, subtract a larger chunk of our 1906 * accumulated delay. 1907 */ 1908 if (cur_use < blkg->last_use) 1909 sub = max_t(u64, sub, blkg->last_delay >> 1); 1910 1911 /* 1912 * This shouldn't happen, but handle it anyway. Our delay_nsec 1913 * should only ever be growing except here where we subtract out 1914 * min(last_delay, 1 second), but lord knows bugs happen and I'd 1915 * rather not end up with negative numbers. 1916 */ 1917 if (unlikely(cur < sub)) { 1918 atomic64_set(&blkg->delay_nsec, 0); 1919 blkg->last_delay = 0; 1920 } else { 1921 atomic64_sub(sub, &blkg->delay_nsec); 1922 blkg->last_delay = cur - sub; 1923 } 1924 blkg->last_use = cur_use; 1925 } 1926 } 1927 1928 /* 1929 * This is called when we want to actually walk up the hierarchy and check to 1930 * see if we need to throttle, and then actually throttle if there is some 1931 * accumulated delay. This should only be called upon return to user space so 1932 * we're not holding some lock that would induce a priority inversion. 1933 */ 1934 static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay) 1935 { 1936 unsigned long pflags; 1937 bool clamp; 1938 u64 now = blk_time_get_ns(); 1939 u64 exp; 1940 u64 delay_nsec = 0; 1941 int tok; 1942 1943 while (blkg->parent) { 1944 int use_delay = atomic_read(&blkg->use_delay); 1945 1946 if (use_delay) { 1947 u64 this_delay; 1948 1949 blkcg_scale_delay(blkg, now); 1950 this_delay = atomic64_read(&blkg->delay_nsec); 1951 if (this_delay > delay_nsec) { 1952 delay_nsec = this_delay; 1953 clamp = use_delay > 0; 1954 } 1955 } 1956 blkg = blkg->parent; 1957 } 1958 1959 if (!delay_nsec) 1960 return; 1961 1962 /* 1963 * Let's not sleep for all eternity if we've amassed a huge delay. 1964 * Swapping or metadata IO can accumulate 10's of seconds worth of 1965 * delay, and we want userspace to be able to do _something_ so cap the 1966 * delays at 0.25s. If there's 10's of seconds worth of delay then the 1967 * tasks will be delayed for 0.25 second for every syscall. If 1968 * blkcg_set_delay() was used as indicated by negative use_delay, the 1969 * caller is responsible for regulating the range. 1970 */ 1971 if (clamp) 1972 delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC); 1973 1974 if (use_memdelay) 1975 psi_memstall_enter(&pflags); 1976 1977 exp = ktime_add_ns(now, delay_nsec); 1978 tok = io_schedule_prepare(); 1979 do { 1980 __set_current_state(TASK_KILLABLE); 1981 if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS)) 1982 break; 1983 } while (!fatal_signal_pending(current)); 1984 io_schedule_finish(tok); 1985 1986 if (use_memdelay) 1987 psi_memstall_leave(&pflags); 1988 } 1989 1990 /** 1991 * blkcg_maybe_throttle_current - throttle the current task if it has been marked 1992 * 1993 * This is only called if we've been marked with set_notify_resume(). Obviously 1994 * we can be set_notify_resume() for reasons other than blkcg throttling, so we 1995 * check to see if current->throttle_disk is set and if not this doesn't do 1996 * anything. This should only ever be called by the resume code, it's not meant 1997 * to be called by people willy-nilly as it will actually do the work to 1998 * throttle the task if it is setup for throttling. 1999 */ 2000 void blkcg_maybe_throttle_current(void) 2001 { 2002 struct gendisk *disk = current->throttle_disk; 2003 struct blkcg *blkcg; 2004 struct blkcg_gq *blkg; 2005 bool use_memdelay = current->use_memdelay; 2006 2007 if (!disk) 2008 return; 2009 2010 current->throttle_disk = NULL; 2011 current->use_memdelay = false; 2012 2013 rcu_read_lock(); 2014 blkcg = css_to_blkcg(blkcg_css()); 2015 if (!blkcg) 2016 goto out; 2017 blkg = blkg_lookup(blkcg, disk->queue); 2018 if (!blkg) 2019 goto out; 2020 if (!blkg_tryget(blkg)) 2021 goto out; 2022 rcu_read_unlock(); 2023 2024 blkcg_maybe_throttle_blkg(blkg, use_memdelay); 2025 blkg_put(blkg); 2026 put_disk(disk); 2027 return; 2028 out: 2029 rcu_read_unlock(); 2030 } 2031 2032 /** 2033 * blkcg_schedule_throttle - this task needs to check for throttling 2034 * @disk: disk to throttle 2035 * @use_memdelay: do we charge this to memory delay for PSI 2036 * 2037 * This is called by the IO controller when we know there's delay accumulated 2038 * for the blkg for this task. We do not pass the blkg because there are places 2039 * we call this that may not have that information, the swapping code for 2040 * instance will only have a block_device at that point. This set's the 2041 * notify_resume for the task to check and see if it requires throttling before 2042 * returning to user space. 2043 * 2044 * We will only schedule once per syscall. You can call this over and over 2045 * again and it will only do the check once upon return to user space, and only 2046 * throttle once. If the task needs to be throttled again it'll need to be 2047 * re-set at the next time we see the task. 2048 */ 2049 void blkcg_schedule_throttle(struct gendisk *disk, bool use_memdelay) 2050 { 2051 if (unlikely(current->flags & PF_KTHREAD)) 2052 return; 2053 2054 if (current->throttle_disk != disk) { 2055 if (test_bit(GD_DEAD, &disk->state)) 2056 return; 2057 get_device(disk_to_dev(disk)); 2058 2059 if (current->throttle_disk) 2060 put_disk(current->throttle_disk); 2061 current->throttle_disk = disk; 2062 } 2063 2064 if (use_memdelay) 2065 current->use_memdelay = use_memdelay; 2066 set_notify_resume(current); 2067 } 2068 2069 /** 2070 * blkcg_add_delay - add delay to this blkg 2071 * @blkg: blkg of interest 2072 * @now: the current time in nanoseconds 2073 * @delta: how many nanoseconds of delay to add 2074 * 2075 * Charge @delta to the blkg's current delay accumulation. This is used to 2076 * throttle tasks if an IO controller thinks we need more throttling. 2077 */ 2078 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta) 2079 { 2080 if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) 2081 return; 2082 blkcg_scale_delay(blkg, now); 2083 atomic64_add(delta, &blkg->delay_nsec); 2084 } 2085 2086 /** 2087 * blkg_tryget_closest - try and get a blkg ref on the closet blkg 2088 * @bio: target bio 2089 * @css: target css 2090 * 2091 * As the failure mode here is to walk up the blkg tree, this ensure that the 2092 * blkg->parent pointers are always valid. This returns the blkg that it ended 2093 * up taking a reference on or %NULL if no reference was taken. 2094 */ 2095 static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio, 2096 struct cgroup_subsys_state *css) 2097 { 2098 struct blkcg_gq *blkg, *ret_blkg = NULL; 2099 2100 rcu_read_lock(); 2101 blkg = blkg_lookup_create(css_to_blkcg(css), bio->bi_bdev->bd_disk); 2102 while (blkg) { 2103 if (blkg_tryget(blkg)) { 2104 ret_blkg = blkg; 2105 break; 2106 } 2107 blkg = blkg->parent; 2108 } 2109 rcu_read_unlock(); 2110 2111 return ret_blkg; 2112 } 2113 2114 /** 2115 * bio_associate_blkg_from_css - associate a bio with a specified css 2116 * @bio: target bio 2117 * @css: target css 2118 * 2119 * Associate @bio with the blkg found by combining the css's blkg and the 2120 * request_queue of the @bio. An association failure is handled by walking up 2121 * the blkg tree. Therefore, the blkg associated can be anything between @blkg 2122 * and q->root_blkg. This situation only happens when a cgroup is dying and 2123 * then the remaining bios will spill to the closest alive blkg. 2124 * 2125 * A reference will be taken on the blkg and will be released when @bio is 2126 * freed. 2127 */ 2128 void bio_associate_blkg_from_css(struct bio *bio, 2129 struct cgroup_subsys_state *css) 2130 { 2131 if (bio->bi_blkg) 2132 blkg_put(bio->bi_blkg); 2133 2134 if (css && css->parent) { 2135 bio->bi_blkg = blkg_tryget_closest(bio, css); 2136 } else { 2137 blkg_get(bdev_get_queue(bio->bi_bdev)->root_blkg); 2138 bio->bi_blkg = bdev_get_queue(bio->bi_bdev)->root_blkg; 2139 } 2140 } 2141 EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css); 2142 2143 /** 2144 * bio_associate_blkg - associate a bio with a blkg 2145 * @bio: target bio 2146 * 2147 * Associate @bio with the blkg found from the bio's css and request_queue. 2148 * If one is not found, bio_lookup_blkg() creates the blkg. If a blkg is 2149 * already associated, the css is reused and association redone as the 2150 * request_queue may have changed. 2151 */ 2152 void bio_associate_blkg(struct bio *bio) 2153 { 2154 struct cgroup_subsys_state *css; 2155 2156 if (blk_op_is_passthrough(bio->bi_opf)) 2157 return; 2158 2159 rcu_read_lock(); 2160 2161 if (bio->bi_blkg) 2162 css = bio_blkcg_css(bio); 2163 else 2164 css = blkcg_css(); 2165 2166 bio_associate_blkg_from_css(bio, css); 2167 2168 rcu_read_unlock(); 2169 } 2170 EXPORT_SYMBOL_GPL(bio_associate_blkg); 2171 2172 /** 2173 * bio_clone_blkg_association - clone blkg association from src to dst bio 2174 * @dst: destination bio 2175 * @src: source bio 2176 */ 2177 void bio_clone_blkg_association(struct bio *dst, struct bio *src) 2178 { 2179 if (src->bi_blkg) 2180 bio_associate_blkg_from_css(dst, bio_blkcg_css(src)); 2181 } 2182 EXPORT_SYMBOL_GPL(bio_clone_blkg_association); 2183 2184 static int blk_cgroup_io_type(struct bio *bio) 2185 { 2186 if (op_is_discard(bio->bi_opf)) 2187 return BLKG_IOSTAT_DISCARD; 2188 if (op_is_write(bio->bi_opf)) 2189 return BLKG_IOSTAT_WRITE; 2190 return BLKG_IOSTAT_READ; 2191 } 2192 2193 void blk_cgroup_bio_start(struct bio *bio) 2194 { 2195 struct blkcg *blkcg = bio->bi_blkg->blkcg; 2196 int rwd = blk_cgroup_io_type(bio), cpu; 2197 struct blkg_iostat_set *bis; 2198 unsigned long flags; 2199 2200 if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) 2201 return; 2202 2203 /* Root-level stats are sourced from system-wide IO stats */ 2204 if (!cgroup_parent(blkcg->css.cgroup)) 2205 return; 2206 2207 cpu = get_cpu(); 2208 bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu); 2209 flags = u64_stats_update_begin_irqsave(&bis->sync); 2210 2211 /* 2212 * If the bio is flagged with BIO_CGROUP_ACCT it means this is a split 2213 * bio and we would have already accounted for the size of the bio. 2214 */ 2215 if (!bio_flagged(bio, BIO_CGROUP_ACCT)) { 2216 bio_set_flag(bio, BIO_CGROUP_ACCT); 2217 bis->cur.bytes[rwd] += bio->bi_iter.bi_size; 2218 } 2219 bis->cur.ios[rwd]++; 2220 2221 /* 2222 * If the iostat_cpu isn't in a lockless list, put it into the 2223 * list to indicate that a stat update is pending. 2224 */ 2225 if (!READ_ONCE(bis->lqueued)) { 2226 struct llist_head *lhead = this_cpu_ptr(blkcg->lhead); 2227 2228 llist_add(&bis->lnode, lhead); 2229 WRITE_ONCE(bis->lqueued, true); 2230 } 2231 2232 u64_stats_update_end_irqrestore(&bis->sync, flags); 2233 css_rstat_updated(&blkcg->css, cpu); 2234 put_cpu(); 2235 } 2236 2237 bool blk_cgroup_congested(void) 2238 { 2239 struct blkcg *blkcg; 2240 bool ret = false; 2241 2242 rcu_read_lock(); 2243 for (blkcg = css_to_blkcg(blkcg_css()); blkcg; 2244 blkcg = blkcg_parent(blkcg)) { 2245 if (atomic_read(&blkcg->congestion_count)) { 2246 ret = true; 2247 break; 2248 } 2249 } 2250 rcu_read_unlock(); 2251 return ret; 2252 } 2253 2254 module_param(blkcg_debug_stats, bool, 0644); 2255 MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not"); 2256