1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BLK_CGROUP_PRIVATE_H 3 #define _BLK_CGROUP_PRIVATE_H 4 /* 5 * block cgroup private header 6 * 7 * Based on ideas and code from CFQ, CFS and BFQ: 8 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 9 * 10 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> 11 * Paolo Valente <paolo.valente@unimore.it> 12 * 13 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> 14 * Nauman Rafique <nauman@google.com> 15 */ 16 17 #include <linux/blk-cgroup.h> 18 #include <linux/cgroup.h> 19 #include <linux/kthread.h> 20 #include <linux/blk-mq.h> 21 #include <linux/llist.h> 22 #include "blk.h" 23 24 struct blkcg_gq; 25 struct blkg_policy_data; 26 27 28 /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */ 29 #define BLKG_STAT_CPU_BATCH (INT_MAX / 2) 30 31 #ifdef CONFIG_BLK_CGROUP 32 33 enum blkg_iostat_type { 34 BLKG_IOSTAT_READ, 35 BLKG_IOSTAT_WRITE, 36 BLKG_IOSTAT_DISCARD, 37 38 BLKG_IOSTAT_NR, 39 }; 40 41 struct blkg_iostat { 42 u64 bytes[BLKG_IOSTAT_NR]; 43 u64 ios[BLKG_IOSTAT_NR]; 44 }; 45 46 struct blkg_iostat_set { 47 struct u64_stats_sync sync; 48 struct blkcg_gq *blkg; 49 struct llist_node lnode; 50 int lqueued; /* queued in llist */ 51 struct blkg_iostat cur; 52 struct blkg_iostat last; 53 }; 54 55 /* association between a blk cgroup and a request queue */ 56 struct blkcg_gq { 57 /* Pointer to the associated request_queue */ 58 struct request_queue *q; 59 struct list_head q_node; 60 struct hlist_node blkcg_node; 61 struct blkcg *blkcg; 62 63 /* all non-root blkcg_gq's are guaranteed to have access to parent */ 64 struct blkcg_gq *parent; 65 66 /* reference count */ 67 struct percpu_ref refcnt; 68 69 /* is this blkg online? protected by both blkcg and q locks */ 70 bool online; 71 72 struct blkg_iostat_set __percpu *iostat_cpu; 73 struct blkg_iostat_set iostat; 74 75 struct blkg_policy_data *pd[BLKCG_MAX_POLS]; 76 #ifdef CONFIG_BLK_CGROUP_PUNT_BIO 77 spinlock_t async_bio_lock; 78 struct bio_list async_bios; 79 #endif 80 union { 81 struct work_struct async_bio_work; 82 struct work_struct free_work; 83 }; 84 85 atomic_t use_delay; 86 atomic64_t delay_nsec; 87 atomic64_t delay_start; 88 u64 last_delay; 89 int last_use; 90 91 struct rcu_head rcu_head; 92 }; 93 94 struct blkcg { 95 struct cgroup_subsys_state css; 96 spinlock_t lock; 97 refcount_t online_pin; 98 /* If there is block congestion on this cgroup. */ 99 atomic_t congestion_count; 100 101 struct radix_tree_root blkg_tree; 102 struct blkcg_gq __rcu *blkg_hint; 103 struct hlist_head blkg_list; 104 105 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS]; 106 107 struct list_head all_blkcgs_node; 108 109 /* 110 * List of updated percpu blkg_iostat_set's since the last flush. 111 */ 112 struct llist_head __percpu *lhead; 113 114 #ifdef CONFIG_BLK_CGROUP_FC_APPID 115 char fc_app_id[FC_APPID_LEN]; 116 #endif 117 #ifdef CONFIG_CGROUP_WRITEBACK 118 struct list_head cgwb_list; 119 #endif 120 }; 121 122 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) 123 { 124 return css ? container_of(css, struct blkcg, css) : NULL; 125 } 126 127 /* 128 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a 129 * request_queue (q). This is used by blkcg policies which need to track 130 * information per blkcg - q pair. 131 * 132 * There can be multiple active blkcg policies and each blkg:policy pair is 133 * represented by a blkg_policy_data which is allocated and freed by each 134 * policy's pd_alloc/free_fn() methods. A policy can allocate private data 135 * area by allocating larger data structure which embeds blkg_policy_data 136 * at the beginning. 137 */ 138 struct blkg_policy_data { 139 /* the blkg and policy id this per-policy data belongs to */ 140 struct blkcg_gq *blkg; 141 int plid; 142 bool online; 143 }; 144 145 /* 146 * Policies that need to keep per-blkcg data which is independent from any 147 * request_queue associated to it should implement cpd_alloc/free_fn() 148 * methods. A policy can allocate private data area by allocating larger 149 * data structure which embeds blkcg_policy_data at the beginning. 150 * cpd_init() is invoked to let each policy handle per-blkcg data. 151 */ 152 struct blkcg_policy_data { 153 /* the blkcg and policy id this per-policy data belongs to */ 154 struct blkcg *blkcg; 155 int plid; 156 }; 157 158 typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp); 159 typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd); 160 typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd); 161 typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd); 162 typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(struct gendisk *disk, 163 struct blkcg *blkcg, gfp_t gfp); 164 typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd); 165 typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd); 166 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd); 167 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd); 168 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd); 169 typedef void (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, 170 struct seq_file *s); 171 172 struct blkcg_policy { 173 int plid; 174 /* cgroup files for the policy */ 175 struct cftype *dfl_cftypes; 176 struct cftype *legacy_cftypes; 177 178 /* operations */ 179 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn; 180 blkcg_pol_free_cpd_fn *cpd_free_fn; 181 182 blkcg_pol_alloc_pd_fn *pd_alloc_fn; 183 blkcg_pol_init_pd_fn *pd_init_fn; 184 blkcg_pol_online_pd_fn *pd_online_fn; 185 blkcg_pol_offline_pd_fn *pd_offline_fn; 186 blkcg_pol_free_pd_fn *pd_free_fn; 187 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; 188 blkcg_pol_stat_pd_fn *pd_stat_fn; 189 }; 190 191 extern struct blkcg blkcg_root; 192 extern bool blkcg_debug_stats; 193 194 void blkg_init_queue(struct request_queue *q); 195 int blkcg_init_disk(struct gendisk *disk); 196 void blkcg_exit_disk(struct gendisk *disk); 197 198 /* Blkio controller policy registration */ 199 int blkcg_policy_register(struct blkcg_policy *pol); 200 void blkcg_policy_unregister(struct blkcg_policy *pol); 201 int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol); 202 void blkcg_deactivate_policy(struct gendisk *disk, 203 const struct blkcg_policy *pol); 204 205 const char *blkg_dev_name(struct blkcg_gq *blkg); 206 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, 207 u64 (*prfill)(struct seq_file *, 208 struct blkg_policy_data *, int), 209 const struct blkcg_policy *pol, int data, 210 bool show_total); 211 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v); 212 213 struct blkg_conf_ctx { 214 char *input; 215 char *body; 216 struct block_device *bdev; 217 struct blkcg_gq *blkg; 218 }; 219 220 void blkg_conf_init(struct blkg_conf_ctx *ctx, char *input); 221 int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx); 222 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, 223 struct blkg_conf_ctx *ctx); 224 void blkg_conf_exit(struct blkg_conf_ctx *ctx); 225 226 /** 227 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg 228 * @bio: the target &bio 229 * 230 * Return: true if this bio needs to be submitted with the root blkg context. 231 * 232 * In order to avoid priority inversions we sometimes need to issue a bio as if 233 * it were attached to the root blkg, and then backcharge to the actual owning 234 * blkg. The idea is we do bio_blkcg_css() to look up the actual context for 235 * the bio and attach the appropriate blkg to the bio. Then we call this helper 236 * and if it is true run with the root blkg for that queue and then do any 237 * backcharging to the originating cgroup once the io is complete. 238 */ 239 static inline bool bio_issue_as_root_blkg(struct bio *bio) 240 { 241 return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0; 242 } 243 244 /** 245 * blkg_lookup - lookup blkg for the specified blkcg - q pair 246 * @blkcg: blkcg of interest 247 * @q: request_queue of interest 248 * 249 * Lookup blkg for the @blkcg - @q pair. 250 * 251 * Must be called in a RCU critical section. 252 */ 253 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, 254 struct request_queue *q) 255 { 256 struct blkcg_gq *blkg; 257 258 if (blkcg == &blkcg_root) 259 return q->root_blkg; 260 261 blkg = rcu_dereference_check(blkcg->blkg_hint, 262 lockdep_is_held(&q->queue_lock)); 263 if (blkg && blkg->q == q) 264 return blkg; 265 266 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); 267 if (blkg && blkg->q != q) 268 blkg = NULL; 269 return blkg; 270 } 271 272 /** 273 * blkg_to_pd - get policy private data 274 * @blkg: blkg of interest 275 * @pol: policy of interest 276 * 277 * Return pointer to private data associated with the @blkg-@pol pair. 278 */ 279 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, 280 struct blkcg_policy *pol) 281 { 282 return blkg ? blkg->pd[pol->plid] : NULL; 283 } 284 285 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg, 286 struct blkcg_policy *pol) 287 { 288 return blkcg ? blkcg->cpd[pol->plid] : NULL; 289 } 290 291 /** 292 * pd_to_blkg - get blkg associated with policy private data 293 * @pd: policy private data of interest 294 * 295 * @pd is policy private data. Determine the blkg it's associated with. 296 */ 297 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) 298 { 299 return pd ? pd->blkg : NULL; 300 } 301 302 static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd) 303 { 304 return cpd ? cpd->blkcg : NULL; 305 } 306 307 /** 308 * blkg_get - get a blkg reference 309 * @blkg: blkg to get 310 * 311 * The caller should be holding an existing reference. 312 */ 313 static inline void blkg_get(struct blkcg_gq *blkg) 314 { 315 percpu_ref_get(&blkg->refcnt); 316 } 317 318 /** 319 * blkg_tryget - try and get a blkg reference 320 * @blkg: blkg to get 321 * 322 * This is for use when doing an RCU lookup of the blkg. We may be in the midst 323 * of freeing this blkg, so we can only use it if the refcnt is not zero. 324 */ 325 static inline bool blkg_tryget(struct blkcg_gq *blkg) 326 { 327 return blkg && percpu_ref_tryget(&blkg->refcnt); 328 } 329 330 /** 331 * blkg_put - put a blkg reference 332 * @blkg: blkg to put 333 */ 334 static inline void blkg_put(struct blkcg_gq *blkg) 335 { 336 percpu_ref_put(&blkg->refcnt); 337 } 338 339 /** 340 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants 341 * @d_blkg: loop cursor pointing to the current descendant 342 * @pos_css: used for iteration 343 * @p_blkg: target blkg to walk descendants of 344 * 345 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU 346 * read locked. If called under either blkcg or queue lock, the iteration 347 * is guaranteed to include all and only online blkgs. The caller may 348 * update @pos_css by calling css_rightmost_descendant() to skip subtree. 349 * @p_blkg is included in the iteration and the first node to be visited. 350 */ 351 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \ 352 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \ 353 if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css), \ 354 (p_blkg)->q))) 355 356 /** 357 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants 358 * @d_blkg: loop cursor pointing to the current descendant 359 * @pos_css: used for iteration 360 * @p_blkg: target blkg to walk descendants of 361 * 362 * Similar to blkg_for_each_descendant_pre() but performs post-order 363 * traversal instead. Synchronization rules are the same. @p_blkg is 364 * included in the iteration and the last node to be visited. 365 */ 366 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \ 367 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \ 368 if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css), \ 369 (p_blkg)->q))) 370 371 static inline void blkcg_bio_issue_init(struct bio *bio) 372 { 373 bio_issue_init(&bio->bi_issue, bio_sectors(bio)); 374 } 375 376 static inline void blkcg_use_delay(struct blkcg_gq *blkg) 377 { 378 if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) 379 return; 380 if (atomic_add_return(1, &blkg->use_delay) == 1) 381 atomic_inc(&blkg->blkcg->congestion_count); 382 } 383 384 static inline int blkcg_unuse_delay(struct blkcg_gq *blkg) 385 { 386 int old = atomic_read(&blkg->use_delay); 387 388 if (WARN_ON_ONCE(old < 0)) 389 return 0; 390 if (old == 0) 391 return 0; 392 393 /* 394 * We do this song and dance because we can race with somebody else 395 * adding or removing delay. If we just did an atomic_dec we'd end up 396 * negative and we'd already be in trouble. We need to subtract 1 and 397 * then check to see if we were the last delay so we can drop the 398 * congestion count on the cgroup. 399 */ 400 while (old && !atomic_try_cmpxchg(&blkg->use_delay, &old, old - 1)) 401 ; 402 403 if (old == 0) 404 return 0; 405 if (old == 1) 406 atomic_dec(&blkg->blkcg->congestion_count); 407 return 1; 408 } 409 410 /** 411 * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount 412 * @blkg: target blkg 413 * @delay: delay duration in nsecs 414 * 415 * When enabled with this function, the delay is not decayed and must be 416 * explicitly cleared with blkcg_clear_delay(). Must not be mixed with 417 * blkcg_[un]use_delay() and blkcg_add_delay() usages. 418 */ 419 static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay) 420 { 421 int old = atomic_read(&blkg->use_delay); 422 423 /* We only want 1 person setting the congestion count for this blkg. */ 424 if (!old && atomic_try_cmpxchg(&blkg->use_delay, &old, -1)) 425 atomic_inc(&blkg->blkcg->congestion_count); 426 427 atomic64_set(&blkg->delay_nsec, delay); 428 } 429 430 /** 431 * blkcg_clear_delay - Disable allocator delay mechanism 432 * @blkg: target blkg 433 * 434 * Disable use_delay mechanism. See blkcg_set_delay(). 435 */ 436 static inline void blkcg_clear_delay(struct blkcg_gq *blkg) 437 { 438 int old = atomic_read(&blkg->use_delay); 439 440 /* We only want 1 person clearing the congestion count for this blkg. */ 441 if (old && atomic_try_cmpxchg(&blkg->use_delay, &old, 0)) 442 atomic_dec(&blkg->blkcg->congestion_count); 443 } 444 445 /** 446 * blk_cgroup_mergeable - Determine whether to allow or disallow merges 447 * @rq: request to merge into 448 * @bio: bio to merge 449 * 450 * @bio and @rq should belong to the same cgroup and their issue_as_root should 451 * match. The latter is necessary as we don't want to throttle e.g. a metadata 452 * update because it happens to be next to a regular IO. 453 */ 454 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) 455 { 456 return rq->bio->bi_blkg == bio->bi_blkg && 457 bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio); 458 } 459 460 void blk_cgroup_bio_start(struct bio *bio); 461 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta); 462 #else /* CONFIG_BLK_CGROUP */ 463 464 struct blkg_policy_data { 465 }; 466 467 struct blkcg_policy_data { 468 }; 469 470 struct blkcg_policy { 471 }; 472 473 struct blkcg { 474 }; 475 476 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } 477 static inline void blkg_init_queue(struct request_queue *q) { } 478 static inline int blkcg_init_disk(struct gendisk *disk) { return 0; } 479 static inline void blkcg_exit_disk(struct gendisk *disk) { } 480 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } 481 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } 482 static inline int blkcg_activate_policy(struct gendisk *disk, 483 const struct blkcg_policy *pol) { return 0; } 484 static inline void blkcg_deactivate_policy(struct gendisk *disk, 485 const struct blkcg_policy *pol) { } 486 487 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, 488 struct blkcg_policy *pol) { return NULL; } 489 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } 490 static inline void blkg_get(struct blkcg_gq *blkg) { } 491 static inline void blkg_put(struct blkcg_gq *blkg) { } 492 static inline void blkcg_bio_issue_init(struct bio *bio) { } 493 static inline void blk_cgroup_bio_start(struct bio *bio) { } 494 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; } 495 496 #define blk_queue_for_each_rl(rl, q) \ 497 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) 498 499 #endif /* CONFIG_BLK_CGROUP */ 500 501 #endif /* _BLK_CGROUP_PRIVATE_H */ 502