xref: /linux/block/blk-cgroup.h (revision 03c305861c70d6db898dd2379b882e7772a5c5d0)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BLK_CGROUP_PRIVATE_H
3 #define _BLK_CGROUP_PRIVATE_H
4 /*
5  * block cgroup private header
6  *
7  * Based on ideas and code from CFQ, CFS and BFQ:
8  * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9  *
10  * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
11  *		      Paolo Valente <paolo.valente@unimore.it>
12  *
13  * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
14  * 	              Nauman Rafique <nauman@google.com>
15  */
16 
17 #include <linux/blk-cgroup.h>
18 #include <linux/cgroup.h>
19 #include <linux/kthread.h>
20 #include <linux/blk-mq.h>
21 #include <linux/llist.h>
22 
23 struct blkcg_gq;
24 struct blkg_policy_data;
25 
26 
27 /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
28 #define BLKG_STAT_CPU_BATCH	(INT_MAX / 2)
29 
30 #ifdef CONFIG_BLK_CGROUP
31 
32 enum blkg_iostat_type {
33 	BLKG_IOSTAT_READ,
34 	BLKG_IOSTAT_WRITE,
35 	BLKG_IOSTAT_DISCARD,
36 
37 	BLKG_IOSTAT_NR,
38 };
39 
40 struct blkg_iostat {
41 	u64				bytes[BLKG_IOSTAT_NR];
42 	u64				ios[BLKG_IOSTAT_NR];
43 };
44 
45 struct blkg_iostat_set {
46 	struct u64_stats_sync		sync;
47 	struct blkcg_gq		       *blkg;
48 	struct llist_node		lnode;
49 	int				lqueued;	/* queued in llist */
50 	struct blkg_iostat		cur;
51 	struct blkg_iostat		last;
52 };
53 
54 /* association between a blk cgroup and a request queue */
55 struct blkcg_gq {
56 	/* Pointer to the associated request_queue */
57 	struct request_queue		*q;
58 	struct list_head		q_node;
59 	struct hlist_node		blkcg_node;
60 	struct blkcg			*blkcg;
61 
62 	/* all non-root blkcg_gq's are guaranteed to have access to parent */
63 	struct blkcg_gq			*parent;
64 
65 	/* reference count */
66 	struct percpu_ref		refcnt;
67 
68 	/* is this blkg online? protected by both blkcg and q locks */
69 	bool				online;
70 
71 	struct blkg_iostat_set __percpu	*iostat_cpu;
72 	struct blkg_iostat_set		iostat;
73 
74 	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
75 #ifdef CONFIG_BLK_CGROUP_PUNT_BIO
76 	spinlock_t			async_bio_lock;
77 	struct bio_list			async_bios;
78 #endif
79 	union {
80 		struct work_struct	async_bio_work;
81 		struct work_struct	free_work;
82 	};
83 
84 	atomic_t			use_delay;
85 	atomic64_t			delay_nsec;
86 	atomic64_t			delay_start;
87 	u64				last_delay;
88 	int				last_use;
89 
90 	struct rcu_head			rcu_head;
91 };
92 
93 struct blkcg {
94 	struct cgroup_subsys_state	css;
95 	spinlock_t			lock;
96 	refcount_t			online_pin;
97 
98 	struct radix_tree_root		blkg_tree;
99 	struct blkcg_gq	__rcu		*blkg_hint;
100 	struct hlist_head		blkg_list;
101 
102 	struct blkcg_policy_data	*cpd[BLKCG_MAX_POLS];
103 
104 	struct list_head		all_blkcgs_node;
105 
106 	/*
107 	 * List of updated percpu blkg_iostat_set's since the last flush.
108 	 */
109 	struct llist_head __percpu	*lhead;
110 
111 #ifdef CONFIG_BLK_CGROUP_FC_APPID
112 	char                            fc_app_id[FC_APPID_LEN];
113 #endif
114 #ifdef CONFIG_CGROUP_WRITEBACK
115 	struct list_head		cgwb_list;
116 #endif
117 };
118 
119 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
120 {
121 	return css ? container_of(css, struct blkcg, css) : NULL;
122 }
123 
124 /*
125  * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
126  * request_queue (q).  This is used by blkcg policies which need to track
127  * information per blkcg - q pair.
128  *
129  * There can be multiple active blkcg policies and each blkg:policy pair is
130  * represented by a blkg_policy_data which is allocated and freed by each
131  * policy's pd_alloc/free_fn() methods.  A policy can allocate private data
132  * area by allocating larger data structure which embeds blkg_policy_data
133  * at the beginning.
134  */
135 struct blkg_policy_data {
136 	/* the blkg and policy id this per-policy data belongs to */
137 	struct blkcg_gq			*blkg;
138 	int				plid;
139 	bool				online;
140 };
141 
142 /*
143  * Policies that need to keep per-blkcg data which is independent from any
144  * request_queue associated to it should implement cpd_alloc/free_fn()
145  * methods.  A policy can allocate private data area by allocating larger
146  * data structure which embeds blkcg_policy_data at the beginning.
147  * cpd_init() is invoked to let each policy handle per-blkcg data.
148  */
149 struct blkcg_policy_data {
150 	/* the blkcg and policy id this per-policy data belongs to */
151 	struct blkcg			*blkcg;
152 	int				plid;
153 };
154 
155 typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
156 typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
157 typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
158 typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
159 typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(struct gendisk *disk,
160 		struct blkcg *blkcg, gfp_t gfp);
161 typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
162 typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
163 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
164 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
165 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
166 typedef void (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd,
167 				struct seq_file *s);
168 
169 struct blkcg_policy {
170 	int				plid;
171 	/* cgroup files for the policy */
172 	struct cftype			*dfl_cftypes;
173 	struct cftype			*legacy_cftypes;
174 
175 	/* operations */
176 	blkcg_pol_alloc_cpd_fn		*cpd_alloc_fn;
177 	blkcg_pol_free_cpd_fn		*cpd_free_fn;
178 
179 	blkcg_pol_alloc_pd_fn		*pd_alloc_fn;
180 	blkcg_pol_init_pd_fn		*pd_init_fn;
181 	blkcg_pol_online_pd_fn		*pd_online_fn;
182 	blkcg_pol_offline_pd_fn		*pd_offline_fn;
183 	blkcg_pol_free_pd_fn		*pd_free_fn;
184 	blkcg_pol_reset_pd_stats_fn	*pd_reset_stats_fn;
185 	blkcg_pol_stat_pd_fn		*pd_stat_fn;
186 };
187 
188 extern struct blkcg blkcg_root;
189 extern bool blkcg_debug_stats;
190 
191 int blkcg_init_disk(struct gendisk *disk);
192 void blkcg_exit_disk(struct gendisk *disk);
193 
194 /* Blkio controller policy registration */
195 int blkcg_policy_register(struct blkcg_policy *pol);
196 void blkcg_policy_unregister(struct blkcg_policy *pol);
197 int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol);
198 void blkcg_deactivate_policy(struct gendisk *disk,
199 			     const struct blkcg_policy *pol);
200 
201 const char *blkg_dev_name(struct blkcg_gq *blkg);
202 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
203 		       u64 (*prfill)(struct seq_file *,
204 				     struct blkg_policy_data *, int),
205 		       const struct blkcg_policy *pol, int data,
206 		       bool show_total);
207 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
208 
209 struct blkg_conf_ctx {
210 	char				*input;
211 	char				*body;
212 	struct block_device		*bdev;
213 	struct blkcg_gq			*blkg;
214 };
215 
216 void blkg_conf_init(struct blkg_conf_ctx *ctx, char *input);
217 int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx);
218 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
219 		   struct blkg_conf_ctx *ctx);
220 void blkg_conf_exit(struct blkg_conf_ctx *ctx);
221 
222 /**
223  * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
224  * @return: true if this bio needs to be submitted with the root blkg context.
225  *
226  * In order to avoid priority inversions we sometimes need to issue a bio as if
227  * it were attached to the root blkg, and then backcharge to the actual owning
228  * blkg.  The idea is we do bio_blkcg_css() to look up the actual context for
229  * the bio and attach the appropriate blkg to the bio.  Then we call this helper
230  * and if it is true run with the root blkg for that queue and then do any
231  * backcharging to the originating cgroup once the io is complete.
232  */
233 static inline bool bio_issue_as_root_blkg(struct bio *bio)
234 {
235 	return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
236 }
237 
238 /**
239  * blkg_lookup - lookup blkg for the specified blkcg - q pair
240  * @blkcg: blkcg of interest
241  * @q: request_queue of interest
242  *
243  * Lookup blkg for the @blkcg - @q pair.
244 
245  * Must be called in a RCU critical section.
246  */
247 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
248 					   struct request_queue *q)
249 {
250 	struct blkcg_gq *blkg;
251 
252 	if (blkcg == &blkcg_root)
253 		return q->root_blkg;
254 
255 	blkg = rcu_dereference(blkcg->blkg_hint);
256 	if (blkg && blkg->q == q)
257 		return blkg;
258 
259 	blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
260 	if (blkg && blkg->q != q)
261 		blkg = NULL;
262 	return blkg;
263 }
264 
265 /**
266  * blkg_to_pdata - get policy private data
267  * @blkg: blkg of interest
268  * @pol: policy of interest
269  *
270  * Return pointer to private data associated with the @blkg-@pol pair.
271  */
272 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
273 						  struct blkcg_policy *pol)
274 {
275 	return blkg ? blkg->pd[pol->plid] : NULL;
276 }
277 
278 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
279 						     struct blkcg_policy *pol)
280 {
281 	return blkcg ? blkcg->cpd[pol->plid] : NULL;
282 }
283 
284 /**
285  * pdata_to_blkg - get blkg associated with policy private data
286  * @pd: policy private data of interest
287  *
288  * @pd is policy private data.  Determine the blkg it's associated with.
289  */
290 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
291 {
292 	return pd ? pd->blkg : NULL;
293 }
294 
295 static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
296 {
297 	return cpd ? cpd->blkcg : NULL;
298 }
299 
300 /**
301  * blkg_path - format cgroup path of blkg
302  * @blkg: blkg of interest
303  * @buf: target buffer
304  * @buflen: target buffer length
305  *
306  * Format the path of the cgroup of @blkg into @buf.
307  */
308 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
309 {
310 	return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
311 }
312 
313 /**
314  * blkg_get - get a blkg reference
315  * @blkg: blkg to get
316  *
317  * The caller should be holding an existing reference.
318  */
319 static inline void blkg_get(struct blkcg_gq *blkg)
320 {
321 	percpu_ref_get(&blkg->refcnt);
322 }
323 
324 /**
325  * blkg_tryget - try and get a blkg reference
326  * @blkg: blkg to get
327  *
328  * This is for use when doing an RCU lookup of the blkg.  We may be in the midst
329  * of freeing this blkg, so we can only use it if the refcnt is not zero.
330  */
331 static inline bool blkg_tryget(struct blkcg_gq *blkg)
332 {
333 	return blkg && percpu_ref_tryget(&blkg->refcnt);
334 }
335 
336 /**
337  * blkg_put - put a blkg reference
338  * @blkg: blkg to put
339  */
340 static inline void blkg_put(struct blkcg_gq *blkg)
341 {
342 	percpu_ref_put(&blkg->refcnt);
343 }
344 
345 /**
346  * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
347  * @d_blkg: loop cursor pointing to the current descendant
348  * @pos_css: used for iteration
349  * @p_blkg: target blkg to walk descendants of
350  *
351  * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
352  * read locked.  If called under either blkcg or queue lock, the iteration
353  * is guaranteed to include all and only online blkgs.  The caller may
354  * update @pos_css by calling css_rightmost_descendant() to skip subtree.
355  * @p_blkg is included in the iteration and the first node to be visited.
356  */
357 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg)		\
358 	css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css)	\
359 		if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css),	\
360 					    (p_blkg)->q)))
361 
362 /**
363  * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
364  * @d_blkg: loop cursor pointing to the current descendant
365  * @pos_css: used for iteration
366  * @p_blkg: target blkg to walk descendants of
367  *
368  * Similar to blkg_for_each_descendant_pre() but performs post-order
369  * traversal instead.  Synchronization rules are the same.  @p_blkg is
370  * included in the iteration and the last node to be visited.
371  */
372 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg)		\
373 	css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css)	\
374 		if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css),	\
375 					    (p_blkg)->q)))
376 
377 static inline void blkcg_bio_issue_init(struct bio *bio)
378 {
379 	bio_issue_init(&bio->bi_issue, bio_sectors(bio));
380 }
381 
382 static inline void blkcg_use_delay(struct blkcg_gq *blkg)
383 {
384 	if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
385 		return;
386 	if (atomic_add_return(1, &blkg->use_delay) == 1)
387 		atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
388 }
389 
390 static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
391 {
392 	int old = atomic_read(&blkg->use_delay);
393 
394 	if (WARN_ON_ONCE(old < 0))
395 		return 0;
396 	if (old == 0)
397 		return 0;
398 
399 	/*
400 	 * We do this song and dance because we can race with somebody else
401 	 * adding or removing delay.  If we just did an atomic_dec we'd end up
402 	 * negative and we'd already be in trouble.  We need to subtract 1 and
403 	 * then check to see if we were the last delay so we can drop the
404 	 * congestion count on the cgroup.
405 	 */
406 	while (old && !atomic_try_cmpxchg(&blkg->use_delay, &old, old - 1))
407 		;
408 
409 	if (old == 0)
410 		return 0;
411 	if (old == 1)
412 		atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
413 	return 1;
414 }
415 
416 /**
417  * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount
418  * @blkg: target blkg
419  * @delay: delay duration in nsecs
420  *
421  * When enabled with this function, the delay is not decayed and must be
422  * explicitly cleared with blkcg_clear_delay(). Must not be mixed with
423  * blkcg_[un]use_delay() and blkcg_add_delay() usages.
424  */
425 static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay)
426 {
427 	int old = atomic_read(&blkg->use_delay);
428 
429 	/* We only want 1 person setting the congestion count for this blkg. */
430 	if (!old && atomic_try_cmpxchg(&blkg->use_delay, &old, -1))
431 		atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
432 
433 	atomic64_set(&blkg->delay_nsec, delay);
434 }
435 
436 /**
437  * blkcg_clear_delay - Disable allocator delay mechanism
438  * @blkg: target blkg
439  *
440  * Disable use_delay mechanism. See blkcg_set_delay().
441  */
442 static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
443 {
444 	int old = atomic_read(&blkg->use_delay);
445 
446 	/* We only want 1 person clearing the congestion count for this blkg. */
447 	if (old && atomic_try_cmpxchg(&blkg->use_delay, &old, 0))
448 		atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
449 }
450 
451 /**
452  * blk_cgroup_mergeable - Determine whether to allow or disallow merges
453  * @rq: request to merge into
454  * @bio: bio to merge
455  *
456  * @bio and @rq should belong to the same cgroup and their issue_as_root should
457  * match. The latter is necessary as we don't want to throttle e.g. a metadata
458  * update because it happens to be next to a regular IO.
459  */
460 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio)
461 {
462 	return rq->bio->bi_blkg == bio->bi_blkg &&
463 		bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio);
464 }
465 
466 void blk_cgroup_bio_start(struct bio *bio);
467 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
468 #else	/* CONFIG_BLK_CGROUP */
469 
470 struct blkg_policy_data {
471 };
472 
473 struct blkcg_policy_data {
474 };
475 
476 struct blkcg_policy {
477 };
478 
479 struct blkcg {
480 };
481 
482 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
483 static inline int blkcg_init_disk(struct gendisk *disk) { return 0; }
484 static inline void blkcg_exit_disk(struct gendisk *disk) { }
485 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
486 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
487 static inline int blkcg_activate_policy(struct gendisk *disk,
488 					const struct blkcg_policy *pol) { return 0; }
489 static inline void blkcg_deactivate_policy(struct gendisk *disk,
490 					   const struct blkcg_policy *pol) { }
491 
492 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
493 						  struct blkcg_policy *pol) { return NULL; }
494 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
495 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
496 static inline void blkg_get(struct blkcg_gq *blkg) { }
497 static inline void blkg_put(struct blkcg_gq *blkg) { }
498 static inline void blkcg_bio_issue_init(struct bio *bio) { }
499 static inline void blk_cgroup_bio_start(struct bio *bio) { }
500 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; }
501 
502 #define blk_queue_for_each_rl(rl, q)	\
503 	for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
504 
505 #endif	/* CONFIG_BLK_CGROUP */
506 
507 #endif /* _BLK_CGROUP_PRIVATE_H */
508